Coverage Report

Created: 2025-06-15 00:57

/src/cmp_tool/lib/icu_compress/cmp_icu.c
Line
Count
Source (jump to first uncovered line)
1
/**
2
 * @file   cmp_icu.c
3
 * @author Dominik Loidolt (dominik.loidolt@univie.ac.at)
4
 * @date   2020
5
 *
6
 * @copyright GPLv2
7
 * This program is free software; you can redistribute it and/or modify it
8
 * under the terms and conditions of the GNU General Public License,
9
 * version 2, as published by the Free Software Foundation.
10
 *
11
 * This program is distributed in the hope it will be useful, but WITHOUT
12
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14
 * more details.
15
 *
16
 * @brief software compression library
17
 * @see Data Compression User Manual PLATO-UVIE-PL-UM-0001
18
 */
19
20
21
#include <stdint.h>
22
#include <string.h>
23
#include <limits.h>
24
25
#include "../common/byteorder.h"
26
#include "../common/compiler.h"
27
#include "../common/cmp_debug.h"
28
#include "../common/cmp_data_types.h"
29
#include "../common/cmp_support.h"
30
#include "../common/cmp_cal_up_model.h"
31
#include "../common/cmp_max_used_bits.h"
32
#include "../common/cmp_entity.h"
33
#include "../common/cmp_error.h"
34
#include "../common/cmp_error_list.h"
35
#include "../common/leon_inttypes.h"
36
#include "cmp_chunk_type.h"
37
38
#include "../cmp_icu.h"
39
#include "../cmp_chunk.h"
40
41
42
/**
43
 * @brief default implementation of the get_timestamp() function
44
 *
45
 * @returns 0
46
 */
47
48
static uint64_t default_get_timestamp(void)
49
0
{
50
0
  return 0;
51
0
}
52
53
54
/**
55
 * @brief function pointer to a function returning a current PLATO timestamp
56
 *  initialised with the compress_chunk_init() function
57
 */
58
59
static uint64_t (*get_timestamp)(void) = default_get_timestamp;
60
61
62
/**
63
 * @brief holding the version_identifier for the compression header
64
 *  initialised with the compress_chunk_init() function
65
 */
66
67
static uint32_t version_identifier;
68
69
70
/**
71
 * @brief structure to hold a setup to encode a value
72
 */
73
74
struct encoder_setup {
75
  uint32_t (*generate_cw_f)(uint32_t value, uint32_t encoder_par1,
76
          uint32_t encoder_par2, uint32_t *cw); /**< function pointer to a code word encoder */
77
  uint32_t (*encode_method_f)(uint32_t data, uint32_t model, uint32_t stream_len,
78
            const struct encoder_setup *setup); /**< pointer to the encoding function */
79
  uint32_t *bitstream_adr; /**< start address of the compressed data bitstream */
80
  uint32_t max_stream_len; /**< maximum length of the bitstream in bits */
81
  uint32_t encoder_par1;   /**< encoding parameter 1 */
82
  uint32_t encoder_par2;   /**< encoding parameter 2 */
83
  uint32_t spillover_par;  /**< outlier parameter */
84
  uint32_t lossy_par;      /**< lossy compression parameter */
85
  uint32_t max_data_bits;  /**< how many bits are needed to represent the highest possible value */
86
};
87
88
89
/**
90
 * @brief map a signed value into a positive value range
91
 *
92
 * @param value_to_map  signed value to map
93
 * @param max_data_bits how many bits are needed to represent the
94
 *      highest possible value
95
 *
96
 * @returns the positive mapped value
97
 */
98
99
static uint32_t map_to_pos(uint32_t value_to_map, unsigned int max_data_bits)
100
56.2k
{
101
56.2k
  uint32_t const mask = (~0U >> (32 - max_data_bits)); /* mask the used bits */
102
56.2k
  uint32_t result;
103
104
56.2k
  value_to_map &= mask;
105
56.2k
  if (value_to_map >> (max_data_bits - 1)) { /* check the leading signed bit */
106
30.8k
    value_to_map |= ~mask; /* convert to 32-bit signed integer */
107
    /* map negative values to uneven numbers */
108
30.8k
    result = (-value_to_map) * 2 - 1; /* possible integer overflow is intended */
109
30.8k
  } else {
110
    /* map positive values to even numbers */
111
25.4k
    result = value_to_map * 2; /* possible integer overflow is intended */
112
25.4k
  }
113
114
56.2k
  return result;
115
56.2k
}
116
117
118
/**
119
 * @brief put the value of up to 32 bits into a big-endian bitstream
120
 *
121
 * @param value     the value to put into the bitstream
122
 * @param n_bits    number of bits to put into the bitstream
123
 * @param bit_offset    bit index where the bits will be put, seen from
124
 *        the very beginning of the bitstream
125
 * @param bitstream_adr   this is the pointer to the beginning of the
126
 *        bitstream (can be NULL)
127
 * @param max_stream_len  maximum length of the bitstream in *bits*; is
128
 *        ignored if bitstream_adr is NULL
129
 *
130
 * @returns the length of the generated bitstream in bits on success or an error
131
 *          code (which can be tested with cmp_is_error()) in the event of an
132
 *          incorrect input or if the bitstream buffer is too small to put the
133
 *          value in the bitstream.
134
 */
135
136
static uint32_t put_n_bits32(uint32_t value, unsigned int n_bits, uint32_t bit_offset,
137
           uint32_t *bitstream_adr, unsigned int max_stream_len)
138
104k
{
139
  /*
140
   *                               UNSEGMENTED
141
   * |-----------|XXXXXX|---------------|--------------------------------|
142
   * |-bits_left-|n_bits|-------------------bits_right-------------------|
143
   * ^
144
   * local_adr
145
   *                               SEGMENTED
146
   * |-----------------------------|XXX|XXX|-----------------------------|
147
   * |----------bits_left----------|n_bits-|---------bits_right----------|
148
   */
149
104k
  uint32_t const bits_left = bit_offset & 0x1F;
150
104k
  uint32_t const bits_right = 64 - bits_left - n_bits;
151
104k
  uint32_t const shift_left = 32 - n_bits;
152
104k
  uint32_t const stream_len = n_bits + bit_offset; /* no check for overflow */
153
104k
  uint32_t *local_adr;
154
104k
  uint32_t mask, tmp;
155
156
  /* Leave in case of erroneous input */
157
104k
  RETURN_ERROR_IF((int)shift_left < 0, INT_DECODER, "cannot insert more than 32 bits into the bit stream");  /* check n_bits <= 32 */
158
159
104k
  if (n_bits == 0)
160
0
    return stream_len;
161
162
104k
  if (!bitstream_adr)  /* Do we need to write data to the bitstream? */
163
99.6k
    return stream_len;
164
165
  /* Check if the bitstream buffer is large enough */
166
4.33k
  if (stream_len > max_stream_len)
167
176
    return CMP_ERROR(SMALL_BUFFER);
168
169
4.16k
  local_adr = bitstream_adr + (bit_offset >> 5);
170
171
  /* clear the destination with inverse mask */
172
4.16k
  mask = (0XFFFFFFFFU << shift_left) >> bits_left;
173
4.16k
  tmp = be32_to_cpu(*local_adr) & ~mask;
174
175
  /* put (the first part of) the value into the bitstream */
176
4.16k
  tmp |= (value << shift_left) >> bits_left;
177
4.16k
  *local_adr = cpu_to_be32(tmp);
178
179
  /* Do we need to split the value over two words (SEGMENTED case) */
180
4.16k
  if (bits_right < 32) {
181
1.48k
    local_adr++;  /* adjust address */
182
183
    /* clear the destination */
184
1.48k
    mask = 0XFFFFFFFFU << bits_right;
185
1.48k
    tmp = be32_to_cpu(*local_adr) & ~mask;
186
187
    /* put the 2nd part of the value into the bitstream */
188
1.48k
    tmp |= value << bits_right;
189
1.48k
    *local_adr = cpu_to_be32(tmp);
190
1.48k
  }
191
4.16k
  return stream_len;
192
4.33k
}
193
194
195
/**
196
 * @brief forms the codeword according to the Rice code
197
 *
198
 * @param value   value to be encoded (must be smaller or equal than cmp_ima_max_spill(m))
199
 * @param m   Golomb parameter, only m's which are a power of 2 are allowed
200
 *      maximum allowed Golomb parameter is 0x80000000
201
 * @param log2_m  Rice parameter, is ilog_2(m) calculate outside function
202
 *      for better performance
203
 * @param cw    address where the code word is stored
204
 *
205
 * @warning there is no check of the validity of the input parameters!
206
 * @returns the length of the formed code word in bits; the code word is invalid
207
 *  if the return value is greater than 32
208
 */
209
210
static uint32_t rice_encoder(uint32_t value, uint32_t m, uint32_t log2_m,
211
           uint32_t *cw)
212
6.13k
{
213
6.13k
  uint32_t const q = value >> log2_m;  /* quotient of value/m */
214
6.13k
  uint32_t const qc = (1U << q) - 1;   /* quotient code without ending zero */
215
216
6.13k
  uint32_t const r = value & (m-1);    /* remainder of value/m */
217
6.13k
  uint32_t const rl = log2_m + 1;      /* length of the remainder (+1 for the 0 in the quotient code) */
218
219
6.13k
  *cw = (qc << (rl & 0x1FU)) | r; /* put the quotient and remainder code together */
220
  /*
221
   * NOTE: If log2_m = 31 -> rl = 32, (q << rl) leads to an undefined
222
   * behavior. However, in this case, a valid code with a maximum of 32
223
   * bits can only be formed if q = 0 and qc = 0. To prevent undefined
224
   * behavior, the right shift operand is masked (& 0x1FU)
225
   */
226
227
6.13k
  return rl + q;  /* calculate the length of the code word */
228
6.13k
}
229
230
231
/**
232
 * @brief forms a codeword according to the Golomb code
233
 *
234
 * @param value   value to be encoded (must be smaller or equal than cmp_ima_max_spill(m))
235
 * @param m   Golomb parameter (have to be bigger than 0)
236
 * @param log2_m  is ilog_2(m) calculate outside function for better performance
237
 * @param cw    address where the code word is stored
238
 *
239
 * @warning there is no check of the validity of the input parameters!
240
 * @returns the length of the formed code word in bits; the code word is invalid
241
 *  if the return value is greater than 32
242
 */
243
244
static uint32_t golomb_encoder(uint32_t value, uint32_t m, uint32_t log2_m,
245
             uint32_t *cw)
246
50.1k
{
247
50.1k
  uint32_t len = log2_m + 1;  /* codeword length in group 0 */
248
50.1k
  uint32_t const cutoff = (0x2U << log2_m) - m;  /* members in group 0 */
249
250
50.1k
  if (value < cutoff) {  /* group 0 */
251
28.7k
    *cw = value;
252
28.7k
  } else {  /* other groups */
253
21.4k
    uint32_t const reg_mask = 0x1FU;  /* mask for the right shift operand to prevent undefined behavior */
254
21.4k
    uint32_t const g = (value-cutoff) / m;  /* group number of same cw length */
255
21.4k
    uint32_t const r = (value-cutoff) - g * m; /* member in the group */
256
21.4k
    uint32_t const gc = (1U << (g & reg_mask)) - 1; /* prepare the left side in unary */
257
21.4k
    uint32_t const b = cutoff << 1;         /* form the base codeword */
258
259
21.4k
    *cw = gc << ((len+1) & reg_mask);  /* composed codeword part 1 */
260
21.4k
    *cw += b + r;                      /* composed codeword part 2 */
261
21.4k
    len += 1 + g;                      /* length of the codeword */
262
21.4k
  }
263
50.1k
  return len;
264
50.1k
}
265
266
267
/**
268
 * @brief generate a code word without an outlier mechanism and put it in the
269
 *  bitstream
270
 *
271
 * @param value   value to encode in the bitstream
272
 * @param stream_len  length of the bitstream in bits
273
 * @param setup   pointer to the encoder setup
274
 *
275
 * @returns the bit length of the bitstream on success or an error code if it
276
 *  fails (which can be tested with cmp_is_error())
277
 */
278
279
static uint32_t encode_normal(uint32_t value, uint32_t stream_len,
280
            const struct encoder_setup *setup)
281
56.2k
{
282
56.2k
  uint32_t code_word, cw_len;
283
284
56.2k
  cw_len = setup->generate_cw_f(value, setup->encoder_par1,
285
56.2k
              setup->encoder_par2, &code_word);
286
287
56.2k
  return put_n_bits32(code_word, cw_len, stream_len, setup->bitstream_adr,
288
56.2k
          setup->max_stream_len);
289
56.2k
}
290
291
292
/**
293
 * @brief subtracts the model from the data, encodes the result and puts it into
294
 *  bitstream, for encoding outlier use the zero escape symbol mechanism
295
 *
296
 * @param data    data to encode
297
 * @param model   model of the data (0 if not used)
298
 * @param stream_len  length of the bitstream in bits
299
 * @param setup   pointer to the encoder setup
300
 *
301
 * @returns the bit length of the bitstream on success or an error code if it
302
 *  fails (which can be tested with cmp_is_error())
303
 *
304
 * @note no check if the data or model are in the allowed range
305
 * @note no check if the setup->spillover_par is in the allowed range
306
 */
307
308
static uint32_t encode_value_zero(uint32_t data, uint32_t model, uint32_t stream_len,
309
          const struct encoder_setup *setup)
310
30.2k
{
311
30.2k
  data -= model; /* possible underflow is intended */
312
313
30.2k
  data = map_to_pos(data, setup->max_data_bits);
314
315
  /* For performance reasons, we check to see if there is an outlier
316
   * before adding one, rather than the other way around:
317
   * data++;
318
   * if (data < setup->spillover_par && data != 0)
319
   *  return ...
320
   */
321
30.2k
  if (data < (setup->spillover_par - 1)) { /* detect non-outlier */
322
6.57k
    data++; /* add 1 to every value so we can use 0 as the escape symbol */
323
6.57k
    return encode_normal(data, stream_len, setup);
324
6.57k
  }
325
326
23.6k
  data++; /* add 1 to every value so we can use 0 as the escape symbol */
327
328
  /* use zero as escape symbol */
329
23.6k
  stream_len = encode_normal(0, stream_len, setup);
330
23.6k
  if (cmp_is_error(stream_len))
331
18
    return stream_len;
332
333
  /* put the data unencoded in the bitstream */
334
23.6k
  stream_len = put_n_bits32(data, setup->max_data_bits, stream_len,
335
23.6k
          setup->bitstream_adr, setup->max_stream_len);
336
23.6k
  return stream_len;
337
23.6k
}
338
339
340
/**
341
 * @brief subtract the model from the data, encode the result and puts it into
342
 *  bitstream, for encoding outlier use the multi escape symbol mechanism
343
 *
344
 * @param data    data to encode
345
 * @param model   model of the data (0 if not used)
346
 * @param stream_len  length of the bitstream in bits
347
 * @param setup   pointer to the encoder setup
348
 *
349
 * @returns the bit length of the bitstream on success or an error code if it
350
 *  fails (which can be tested with cmp_is_error())
351
 *
352
 * @note no check if the data or model are in the allowed range
353
 * @note no check if the setup->spillover_par is in the allowed range
354
 */
355
356
static uint32_t encode_value_multi(uint32_t data, uint32_t model, uint32_t stream_len,
357
           const struct encoder_setup *setup)
358
26.0k
{
359
26.0k
  uint32_t unencoded_data;
360
26.0k
  unsigned int unencoded_data_len;
361
26.0k
  uint32_t escape_sym, escape_sym_offset;
362
363
26.0k
  data -= model; /* possible underflow is intended */
364
365
26.0k
  data = map_to_pos(data, setup->max_data_bits);
366
367
26.0k
  if (data < setup->spillover_par) /* detect non-outlier */
368
1.98k
    return  encode_normal(data, stream_len, setup);
369
370
  /*
371
   * In this mode we put the difference between the data and the spillover
372
   * threshold value (unencoded_data) after an encoded escape symbol, which
373
   * indicates that the next codeword is unencoded.
374
   * We use different escape symbols depending on the size of the needed
375
   * bit of unencoded data:
376
   * 0, 1, 2 bits needed for unencoded data -> escape symbol is spillover_par + 0
377
   * 3, 4 bits needed for unencoded data -> escape symbol is spillover_par + 1
378
   * 5, 6 bits needed for unencoded data -> escape symbol is spillover_par + 2
379
   * and so on
380
   */
381
24.0k
  unencoded_data = data - setup->spillover_par;
382
383
24.0k
  if (!unencoded_data) /* catch __builtin_clz(0) because the result is undefined.*/
384
652
    escape_sym_offset = 0;
385
23.4k
  else
386
23.4k
    escape_sym_offset = (31U - (uint32_t)__builtin_clz(unencoded_data)) >> 1;
387
388
24.0k
  escape_sym = setup->spillover_par + escape_sym_offset;
389
24.0k
  unencoded_data_len = (escape_sym_offset + 1U) << 1;
390
391
  /* put the escape symbol in the bitstream */
392
24.0k
  stream_len = encode_normal(escape_sym, stream_len, setup);
393
24.0k
  if (cmp_is_error(stream_len))
394
38
    return stream_len;
395
396
  /* put the unencoded data in the bitstream */
397
24.0k
  stream_len = put_n_bits32(unencoded_data, unencoded_data_len, stream_len,
398
24.0k
          setup->bitstream_adr, setup->max_stream_len);
399
24.0k
  return stream_len;
400
24.0k
}
401
402
403
/**
404
 * @brief encodes the data with the model and the given setup and put it into
405
 *  the bitstream
406
 *
407
 * @param data    data to encode
408
 * @param model   model of the data (0 if not used)
409
 * @param stream_len  length of the bitstream in bits
410
 * @param setup   pointer to the encoder setup
411
 *
412
 * @returns the bit length of the bitstream on success or an error code if it
413
 *  fails (which can be tested with cmp_is_error())
414
 */
415
416
static uint32_t encode_value(uint32_t data, uint32_t model, uint32_t stream_len,
417
           const struct encoder_setup *setup)
418
56.2k
{
419
56.2k
  uint32_t const mask = ~(0xFFFFFFFFU >> (32-setup->max_data_bits));
420
421
  /* lossy rounding of the data if lossy_par > 0 */
422
56.2k
  data = round_fwd(data, setup->lossy_par);
423
56.2k
  model = round_fwd(model, setup->lossy_par);
424
425
56.2k
  RETURN_ERROR_IF(data & mask || model & mask, DATA_VALUE_TOO_LARGE, "");
426
427
56.2k
  return setup->encode_method_f(data, model, stream_len, setup);
428
56.2k
}
429
430
431
/**
432
 * @brief calculate the maximum length of the bitstream in bits
433
 * @note we round down to the next 4-byte allied address because we access the
434
 *  cmp_buffer in uint32_t words
435
 *
436
 * @param stream_size size of the bitstream in bytes
437
 *
438
 * @returns buffer size in bits
439
 */
440
441
static uint32_t cmp_stream_size_to_bits(uint32_t stream_size)
442
3.32k
{
443
3.32k
  return (stream_size & ~0x3U) * 8;
444
3.32k
}
445
446
447
/**
448
 * @brief configure an encoder setup structure to have a setup to encode a value
449
 *
450
 * @param setup   pointer to the encoder setup
451
 * @param cmp_par compression parameter
452
 * @param spillover spillover_par parameter
453
 * @param lossy_par lossy compression parameter
454
 * @param max_data_bits how many bits are needed to represent the highest possible value
455
 * @param cfg   pointer to the compression configuration structure
456
 *
457
 * @warning input parameters are not checked for validity
458
 */
459
460
static void configure_encoder_setup(struct encoder_setup *setup,
461
            uint32_t cmp_par, uint32_t spillover,
462
            uint32_t lossy_par, uint32_t max_data_bits,
463
            const struct cmp_cfg *cfg)
464
3.21k
{
465
3.21k
  memset(setup, 0, sizeof(struct encoder_setup));
466
467
3.21k
  setup->encoder_par1 = cmp_par;
468
3.21k
  setup->max_data_bits = max_data_bits;
469
3.21k
  setup->lossy_par = lossy_par;
470
3.21k
  setup->bitstream_adr = cfg->dst;
471
3.21k
  setup->max_stream_len = cmp_stream_size_to_bits(cfg->stream_size);
472
3.21k
  setup->encoder_par2 = ilog_2(cmp_par);
473
3.21k
  setup->spillover_par = spillover;
474
475
  /* for encoder_par1 which is a power of two we can use the faster rice_encoder */
476
3.21k
  if (is_a_pow_of_2(setup->encoder_par1))
477
483
    setup->generate_cw_f = &rice_encoder;
478
2.73k
  else
479
2.73k
    setup->generate_cw_f = &golomb_encoder;
480
481
  /* CMP_MODE_RAW is already handled before */
482
3.21k
  if (cfg->cmp_mode == CMP_MODE_MODEL_ZERO ||
483
3.21k
      cfg->cmp_mode == CMP_MODE_DIFF_ZERO)
484
1.83k
    setup->encode_method_f = &encode_value_zero;
485
1.38k
  else
486
1.38k
    setup->encode_method_f = &encode_value_multi;
487
3.21k
}
488
489
490
/**
491
 * @brief compress imagette data
492
 *
493
 * @param cfg   pointer to the compression configuration structure
494
 * @param stream_len  already used length of the bitstream in bits
495
 *
496
 * @returns the bit length of the bitstream on success or an error code if it
497
 *  fails (which can be tested with cmp_is_error())
498
 */
499
500
static uint32_t compress_imagette(const struct cmp_cfg *cfg, uint32_t stream_len)
501
2.82k
{
502
2.82k
  size_t i;
503
2.82k
  struct encoder_setup setup;
504
2.82k
  uint32_t max_data_bits;
505
506
2.82k
  const uint16_t *data_buf = cfg->src;
507
2.82k
  const uint16_t *model_buf = cfg->model_buf;
508
2.82k
  uint16_t model = 0;
509
2.82k
  const uint16_t *next_model_p = data_buf;
510
2.82k
  uint16_t *up_model_buf = NULL;
511
512
2.82k
  if (model_mode_is_used(cfg->cmp_mode)) {
513
1.01k
    model = get_unaligned(&model_buf[0]);
514
1.01k
    next_model_p = &model_buf[1];
515
1.01k
    up_model_buf = cfg->updated_model_buf;
516
1.01k
  }
517
518
2.82k
  if (cfg->data_type == DATA_TYPE_F_CAM_IMAGETTE ||
519
2.82k
      cfg->data_type == DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE) {
520
1
    max_data_bits = MAX_USED_BITS.fc_imagette;
521
2.82k
  } else if (cfg->data_type == DATA_TYPE_SAT_IMAGETTE ||
522
2.82k
       cfg->data_type == DATA_TYPE_SAT_IMAGETTE_ADAPTIVE) {
523
1
    max_data_bits = MAX_USED_BITS.saturated_imagette;
524
2.82k
  } else { /* DATA_TYPE_IMAGETTE, DATA_TYPE_IMAGETTE_ADAPTIVE */
525
2.82k
    max_data_bits = MAX_USED_BITS.nc_imagette;
526
2.82k
  }
527
528
2.82k
  configure_encoder_setup(&setup, cfg->cmp_par_imagette,
529
2.82k
        cfg->spill_imagette, cfg->round, max_data_bits, cfg);
530
531
55.1k
  for (i = 0;; i++) {
532
55.1k
    stream_len = encode_value(get_unaligned(&data_buf[i]),
533
55.1k
            model, stream_len, &setup);
534
55.1k
    if (cmp_is_error(stream_len))
535
21
      break;
536
537
55.1k
    if (up_model_buf) {
538
23.8k
      uint16_t data = get_unaligned(&data_buf[i]);
539
23.8k
      up_model_buf[i] = cmp_up_model(data, model, cfg->model_value,
540
23.8k
                   setup.lossy_par);
541
23.8k
    }
542
55.1k
    if (i >= cfg->samples-1)
543
2.80k
      break;
544
545
52.3k
    model = get_unaligned(&next_model_p[i]);
546
52.3k
  }
547
2.82k
  return stream_len;
548
2.82k
}
549
550
551
/**
552
 * @brief compress short normal light flux (S_FX) data
553
 *
554
 * @param cfg   pointer to the compression configuration structure
555
 * @param stream_len  already used length of the bitstream in bits
556
 *
557
 * @returns the bit length of the bitstream on success or an error code if it
558
 *  fails (which can be tested with cmp_is_error())
559
 */
560
561
static uint32_t compress_s_fx(const struct cmp_cfg *cfg, uint32_t stream_len)
562
78
{
563
78
  size_t i;
564
565
78
  const struct s_fx *data_buf = cfg->src;
566
78
  const struct s_fx *model_buf = cfg->model_buf;
567
78
  struct s_fx *up_model_buf = NULL;
568
78
  const struct s_fx *next_model_p;
569
78
  struct s_fx model;
570
78
  struct encoder_setup setup_exp_flag, setup_fx;
571
572
78
  if (model_mode_is_used(cfg->cmp_mode)) {
573
0
    model = model_buf[0];
574
0
    next_model_p = &model_buf[1];
575
0
    up_model_buf = cfg->updated_model_buf;
576
78
  } else {
577
78
    memset(&model, 0, sizeof(model));
578
78
    next_model_p = data_buf;
579
78
  }
580
581
78
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
582
78
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
583
78
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
584
78
        cfg->round, MAX_USED_BITS.s_fx, cfg);
585
586
247
  for (i = 0;; i++) {
587
247
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
588
247
            stream_len, &setup_exp_flag);
589
247
    if (cmp_is_error(stream_len))
590
34
      break;
591
213
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
592
213
            &setup_fx);
593
213
    if (cmp_is_error(stream_len))
594
43
      break;
595
596
170
    if (up_model_buf) {
597
0
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
598
0
                 cfg->model_value, setup_exp_flag.lossy_par);
599
0
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
600
0
                cfg->model_value, setup_fx.lossy_par);
601
0
    }
602
603
170
    if (i >= cfg->samples-1)
604
1
      break;
605
606
169
    model = next_model_p[i];
607
169
  }
608
78
  return stream_len;
609
78
}
610
611
612
/**
613
 * @brief compress S_FX_EFX data
614
 *
615
 * @param cfg   pointer to the compression configuration structure
616
 * @param stream_len  already used length of the bitstream in bits
617
 *
618
 * @returns the bit length of the bitstream on success or an error code if it
619
 *  fails (which can be tested with cmp_is_error())
620
 */
621
622
static uint32_t compress_s_fx_efx(const struct cmp_cfg *cfg, uint32_t stream_len)
623
80
{
624
80
  size_t i;
625
626
80
  const struct s_fx_efx *data_buf = cfg->src;
627
80
  const struct s_fx_efx *model_buf = cfg->model_buf;
628
80
  struct s_fx_efx *up_model_buf = NULL;
629
80
  const struct s_fx_efx *next_model_p;
630
80
  struct s_fx_efx model;
631
80
  struct encoder_setup setup_exp_flag, setup_fx, setup_efx;
632
633
80
  if (model_mode_is_used(cfg->cmp_mode)) {
634
0
    model = model_buf[0];
635
0
    next_model_p = &model_buf[1];
636
0
    up_model_buf = cfg->updated_model_buf;
637
80
  } else {
638
80
    memset(&model, 0, sizeof(model));
639
80
    next_model_p = data_buf;
640
80
  }
641
642
80
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
643
80
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
644
80
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
645
80
        cfg->round, MAX_USED_BITS.s_fx, cfg);
646
80
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
647
80
        cfg->round, MAX_USED_BITS.s_efx, cfg);
648
649
223
  for (i = 0;; i++) {
650
223
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
651
223
            stream_len, &setup_exp_flag);
652
223
    if (cmp_is_error(stream_len))
653
13
      break;
654
210
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
655
210
            &setup_fx);
656
210
    if (cmp_is_error(stream_len))
657
32
      break;
658
178
    stream_len = encode_value(data_buf[i].efx, model.efx,
659
178
            stream_len, &setup_efx);
660
178
    if (cmp_is_error(stream_len))
661
33
      return stream_len;
662
663
145
    if (up_model_buf) {
664
0
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
665
0
        cfg->model_value, setup_exp_flag.lossy_par);
666
0
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
667
0
        cfg->model_value, setup_fx.lossy_par);
668
0
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
669
0
        cfg->model_value, setup_efx.lossy_par);
670
0
    }
671
672
145
    if (i >= cfg->samples-1)
673
2
      break;
674
675
143
    model = next_model_p[i];
676
143
  }
677
47
  return stream_len;
678
80
}
679
680
681
/**
682
 * @brief compress S_FX_NCOB data
683
 *
684
 * @param cfg   pointer to the compression configuration structure
685
 * @param stream_len  already used length of the bitstream in bits
686
 *
687
 * @returns the bit length of the bitstream on success or an error code if it
688
 *  fails (which can be tested with cmp_is_error())
689
 */
690
691
static uint32_t compress_s_fx_ncob(const struct cmp_cfg *cfg, uint32_t stream_len)
692
0
{
693
0
  size_t i;
694
695
0
  const struct s_fx_ncob *data_buf = cfg->src;
696
0
  const struct s_fx_ncob *model_buf = cfg->model_buf;
697
0
  struct s_fx_ncob *up_model_buf = NULL;
698
0
  const struct s_fx_ncob *next_model_p;
699
0
  struct s_fx_ncob model;
700
0
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob;
701
702
0
  if (model_mode_is_used(cfg->cmp_mode)) {
703
0
    model = model_buf[0];
704
0
    next_model_p = &model_buf[1];
705
0
    up_model_buf = cfg->updated_model_buf;
706
0
  } else {
707
0
    memset(&model, 0, sizeof(model));
708
0
    next_model_p = data_buf;
709
0
  }
710
711
0
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
712
0
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
713
0
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
714
0
        cfg->round, MAX_USED_BITS.s_fx, cfg);
715
0
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
716
0
        cfg->round, MAX_USED_BITS.s_ncob, cfg);
717
718
0
  for (i = 0;; i++) {
719
0
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
720
0
            stream_len, &setup_exp_flag);
721
0
    if (cmp_is_error(stream_len))
722
0
      break;
723
0
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
724
0
            &setup_fx);
725
0
    if (cmp_is_error(stream_len))
726
0
      break;
727
0
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
728
0
            stream_len, &setup_ncob);
729
0
    if (cmp_is_error(stream_len))
730
0
      break;
731
0
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
732
0
            stream_len, &setup_ncob);
733
0
    if (cmp_is_error(stream_len))
734
0
      break;
735
736
0
    if (up_model_buf) {
737
0
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
738
0
        cfg->model_value, setup_exp_flag.lossy_par);
739
0
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
740
0
        cfg->model_value, setup_fx.lossy_par);
741
0
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
742
0
        cfg->model_value, setup_ncob.lossy_par);
743
0
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
744
0
        cfg->model_value, setup_ncob.lossy_par);
745
0
    }
746
747
0
    if (i >= cfg->samples-1)
748
0
      break;
749
750
0
    model = next_model_p[i];
751
0
  }
752
0
  return stream_len;
753
0
}
754
755
756
/**
757
 * @brief compress S_FX_EFX_NCOB_ECOB data
758
 *
759
 * @param cfg   pointer to the compression configuration structure
760
 * @param stream_len  already used length of the bitstream in bits
761
 *
762
 * @returns the bit length of the bitstream on success or an error code if it
763
 *  fails (which can be tested with cmp_is_error())
764
 */
765
766
static uint32_t compress_s_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, uint32_t stream_len)
767
0
{
768
0
  size_t i;
769
770
0
  const struct s_fx_efx_ncob_ecob *data_buf = cfg->src;
771
0
  const struct s_fx_efx_ncob_ecob *model_buf = cfg->model_buf;
772
0
  struct s_fx_efx_ncob_ecob *up_model_buf = NULL;
773
0
  const struct s_fx_efx_ncob_ecob *next_model_p;
774
0
  struct s_fx_efx_ncob_ecob model;
775
0
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob, setup_efx,
776
0
            setup_ecob;
777
778
0
  if (model_mode_is_used(cfg->cmp_mode)) {
779
0
    model = model_buf[0];
780
0
    next_model_p = &model_buf[1];
781
0
    up_model_buf = cfg->updated_model_buf;
782
0
  } else {
783
0
    memset(&model, 0, sizeof(model));
784
0
    next_model_p = data_buf;
785
0
  }
786
787
0
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
788
0
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
789
0
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
790
0
        cfg->round, MAX_USED_BITS.s_fx, cfg);
791
0
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
792
0
        cfg->round, MAX_USED_BITS.s_ncob, cfg);
793
0
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
794
0
        cfg->round, MAX_USED_BITS.s_efx, cfg);
795
0
  configure_encoder_setup(&setup_ecob, cfg->cmp_par_ecob, cfg->spill_ecob,
796
0
        cfg->round, MAX_USED_BITS.s_ecob, cfg);
797
798
0
  for (i = 0;; i++) {
799
0
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
800
0
            stream_len, &setup_exp_flag);
801
0
    if (cmp_is_error(stream_len))
802
0
      break;
803
0
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
804
0
            &setup_fx);
805
0
    if (cmp_is_error(stream_len))
806
0
      break;
807
0
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
808
0
            stream_len, &setup_ncob);
809
0
    if (cmp_is_error(stream_len))
810
0
      break;
811
0
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
812
0
            stream_len, &setup_ncob);
813
0
    if (cmp_is_error(stream_len))
814
0
      break;
815
0
    stream_len = encode_value(data_buf[i].efx, model.efx,
816
0
            stream_len, &setup_efx);
817
0
    if (cmp_is_error(stream_len))
818
0
      break;
819
0
    stream_len = encode_value(data_buf[i].ecob_x, model.ecob_x,
820
0
            stream_len, &setup_ecob);
821
0
    if (cmp_is_error(stream_len))
822
0
      break;
823
0
    stream_len = encode_value(data_buf[i].ecob_y, model.ecob_y,
824
0
            stream_len, &setup_ecob);
825
0
    if (cmp_is_error(stream_len))
826
0
      break;
827
828
0
    if (up_model_buf) {
829
0
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
830
0
        cfg->model_value, setup_exp_flag.lossy_par);
831
0
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
832
0
        cfg->model_value, setup_fx.lossy_par);
833
0
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
834
0
        cfg->model_value, setup_ncob.lossy_par);
835
0
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
836
0
        cfg->model_value, setup_ncob.lossy_par);
837
0
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
838
0
        cfg->model_value, setup_efx.lossy_par);
839
0
      up_model_buf[i].ecob_x = cmp_up_model(data_buf[i].ecob_x, model.ecob_x,
840
0
        cfg->model_value, setup_ecob.lossy_par);
841
0
      up_model_buf[i].ecob_y = cmp_up_model(data_buf[i].ecob_y, model.ecob_y,
842
0
        cfg->model_value, setup_ecob.lossy_par);
843
0
    }
844
845
0
    if (i >= cfg->samples-1)
846
0
      break;
847
848
0
    model = next_model_p[i];
849
0
  }
850
0
  return stream_len;
851
0
}
852
853
854
/**
855
 * @brief compress L_FX data
856
 *
857
 * @param cfg   pointer to the compression configuration structure
858
 * @param stream_len  already used length of the bitstream in bits
859
 *
860
 * @returns the bit length of the bitstream on success or an error code if it
861
 *  fails (which can be tested with cmp_is_error())
862
 */
863
864
static uint32_t compress_l_fx(const struct cmp_cfg *cfg, uint32_t stream_len)
865
0
{
866
0
  size_t i;
867
868
0
  const struct l_fx *data_buf = cfg->src;
869
0
  const struct l_fx *model_buf = cfg->model_buf;
870
0
  struct l_fx *up_model_buf = NULL;
871
0
  const struct l_fx *next_model_p;
872
0
  struct l_fx model;
873
0
  struct encoder_setup setup_exp_flag, setup_fx, setup_fx_var;
874
875
0
  if (model_mode_is_used(cfg->cmp_mode)) {
876
0
    model = model_buf[0];
877
0
    next_model_p = &model_buf[1];
878
0
    up_model_buf = cfg->updated_model_buf;
879
0
  } else {
880
0
    memset(&model, 0, sizeof(model));
881
0
    next_model_p = data_buf;
882
0
  }
883
884
0
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
885
0
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
886
0
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
887
0
        cfg->round, MAX_USED_BITS.l_fx, cfg);
888
0
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
889
0
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
890
891
0
  for (i = 0;; i++) {
892
0
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
893
0
            stream_len, &setup_exp_flag);
894
0
    if (cmp_is_error(stream_len))
895
0
      break;
896
0
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
897
0
            &setup_fx);
898
0
    if (cmp_is_error(stream_len))
899
0
      break;
900
0
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
901
0
            stream_len, &setup_fx_var);
902
0
    if (cmp_is_error(stream_len))
903
0
      break;
904
905
0
    if (up_model_buf) {
906
0
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
907
0
        cfg->model_value, setup_exp_flag.lossy_par);
908
0
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
909
0
        cfg->model_value, setup_fx.lossy_par);
910
0
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
911
0
        cfg->model_value, setup_fx_var.lossy_par);
912
0
    }
913
914
0
    if (i >= cfg->samples-1)
915
0
      break;
916
917
0
    model = next_model_p[i];
918
0
  }
919
0
  return stream_len;
920
0
}
921
922
923
/**
924
 * @brief compress L_FX_EFX data
925
 *
926
 * @param cfg   pointer to the compression configuration structure
927
 * @param stream_len  already used length of the bitstream in bits
928
 *
929
 * @returns the bit length of the bitstream on success or an error code if it
930
 *  fails (which can be tested with cmp_is_error())
931
 */
932
933
static uint32_t compress_l_fx_efx(const struct cmp_cfg *cfg, uint32_t stream_len)
934
0
{
935
0
  size_t i;
936
937
0
  const struct l_fx_efx *data_buf = cfg->src;
938
0
  const struct l_fx_efx *model_buf = cfg->model_buf;
939
0
  struct l_fx_efx *up_model_buf = NULL;
940
0
  const struct l_fx_efx *next_model_p;
941
0
  struct l_fx_efx model;
942
0
  struct encoder_setup setup_exp_flag, setup_fx, setup_efx, setup_fx_var;
943
944
0
  if (model_mode_is_used(cfg->cmp_mode)) {
945
0
    model = model_buf[0];
946
0
    next_model_p = &model_buf[1];
947
0
    up_model_buf = cfg->updated_model_buf;
948
0
  } else {
949
0
    memset(&model, 0, sizeof(model));
950
0
    next_model_p = data_buf;
951
0
  }
952
953
0
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
954
0
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
955
0
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
956
0
        cfg->round, MAX_USED_BITS.l_fx, cfg);
957
0
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
958
0
        cfg->round, MAX_USED_BITS.l_efx, cfg);
959
0
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
960
0
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
961
962
0
  for (i = 0;; i++) {
963
0
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
964
0
            stream_len, &setup_exp_flag);
965
0
    if (cmp_is_error(stream_len))
966
0
      break;
967
0
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
968
0
            &setup_fx);
969
0
    if (cmp_is_error(stream_len))
970
0
      break;
971
0
    stream_len = encode_value(data_buf[i].efx, model.efx,
972
0
            stream_len, &setup_efx);
973
0
    if (cmp_is_error(stream_len))
974
0
      break;
975
0
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
976
0
            stream_len, &setup_fx_var);
977
0
    if (cmp_is_error(stream_len))
978
0
      break;
979
980
0
    if (up_model_buf) {
981
0
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
982
0
        cfg->model_value, setup_exp_flag.lossy_par);
983
0
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
984
0
        cfg->model_value, setup_fx.lossy_par);
985
0
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
986
0
        cfg->model_value, setup_efx.lossy_par);
987
0
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
988
0
        cfg->model_value, setup_fx_var.lossy_par);
989
0
    }
990
991
0
    if (i >= cfg->samples-1)
992
0
      break;
993
994
0
    model = next_model_p[i];
995
0
  }
996
0
  return stream_len;
997
0
}
998
999
1000
/**
1001
 * @brief compress L_FX_NCOB data
1002
 *
1003
 * @param cfg   pointer to the compression configuration structure
1004
 * @param stream_len  already used length of the bitstream in bits
1005
 *
1006
 * @returns the bit length of the bitstream on success or an error code if it
1007
 *  fails (which can be tested with cmp_is_error())
1008
 */
1009
1010
static uint32_t compress_l_fx_ncob(const struct cmp_cfg *cfg, uint32_t stream_len)
1011
0
{
1012
0
  size_t i;
1013
1014
0
  const struct l_fx_ncob *data_buf = cfg->src;
1015
0
  const struct l_fx_ncob *model_buf = cfg->model_buf;
1016
0
  struct l_fx_ncob *up_model_buf = NULL;
1017
0
  const struct l_fx_ncob *next_model_p;
1018
0
  struct l_fx_ncob model;
1019
0
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob,
1020
0
            setup_fx_var, setup_cob_var;
1021
1022
0
  if (model_mode_is_used(cfg->cmp_mode)) {
1023
0
    model = model_buf[0];
1024
0
    next_model_p = &model_buf[1];
1025
0
    up_model_buf = cfg->updated_model_buf;
1026
0
  } else {
1027
0
    memset(&model, 0, sizeof(model));
1028
0
    next_model_p = data_buf;
1029
0
  }
1030
1031
0
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
1032
0
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
1033
0
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
1034
0
        cfg->round, MAX_USED_BITS.l_fx, cfg);
1035
0
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
1036
0
        cfg->round, MAX_USED_BITS.l_ncob, cfg);
1037
  /* we use the cmp_par_fx_cob_variance parameter for fx and cob variance data */
1038
0
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1039
0
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1040
0
  configure_encoder_setup(&setup_cob_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1041
0
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1042
1043
0
  for (i = 0;; i++) {
1044
0
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
1045
0
            stream_len, &setup_exp_flag);
1046
0
    if (cmp_is_error(stream_len))
1047
0
      break;
1048
0
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
1049
0
            &setup_fx);
1050
0
    if (cmp_is_error(stream_len))
1051
0
      break;
1052
0
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
1053
0
            stream_len, &setup_ncob);
1054
0
    if (cmp_is_error(stream_len))
1055
0
      break;
1056
0
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
1057
0
            stream_len, &setup_ncob);
1058
0
    if (cmp_is_error(stream_len))
1059
0
      break;
1060
0
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
1061
0
            stream_len, &setup_fx_var);
1062
0
    if (cmp_is_error(stream_len))
1063
0
      break;
1064
0
    stream_len = encode_value(data_buf[i].cob_x_variance, model.cob_x_variance,
1065
0
            stream_len, &setup_cob_var);
1066
0
    if (cmp_is_error(stream_len))
1067
0
      break;
1068
0
    stream_len = encode_value(data_buf[i].cob_y_variance, model.cob_y_variance,
1069
0
            stream_len, &setup_cob_var);
1070
0
    if (cmp_is_error(stream_len))
1071
0
      break;
1072
1073
0
    if (up_model_buf) {
1074
0
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
1075
0
        cfg->model_value, setup_exp_flag.lossy_par);
1076
0
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
1077
0
        cfg->model_value, setup_fx.lossy_par);
1078
0
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
1079
0
        cfg->model_value, setup_ncob.lossy_par);
1080
0
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
1081
0
        cfg->model_value, setup_ncob.lossy_par);
1082
0
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
1083
0
        cfg->model_value, setup_fx_var.lossy_par);
1084
0
      up_model_buf[i].cob_x_variance = cmp_up_model(data_buf[i].cob_x_variance, model.cob_x_variance,
1085
0
        cfg->model_value, setup_cob_var.lossy_par);
1086
0
      up_model_buf[i].cob_y_variance = cmp_up_model(data_buf[i].cob_y_variance, model.cob_y_variance,
1087
0
        cfg->model_value, setup_cob_var.lossy_par);
1088
0
    }
1089
1090
0
    if (i >= cfg->samples-1)
1091
0
      break;
1092
1093
0
    model = next_model_p[i];
1094
0
  }
1095
0
  return stream_len;
1096
0
}
1097
1098
1099
/**
1100
 * @brief compress L_FX_EFX_NCOB_ECOB data
1101
 *
1102
 * @param cfg   pointer to the compression configuration structure
1103
 * @param stream_len  already used length of the bitstream in bits
1104
 *
1105
 * @returns the bit length of the bitstream on success or an error code if it
1106
 *  fails (which can be tested with cmp_is_error())
1107
 */
1108
1109
static uint32_t compress_l_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, uint32_t stream_len)
1110
0
{
1111
0
  size_t i;
1112
1113
0
  const struct l_fx_efx_ncob_ecob *data_buf = cfg->src;
1114
0
  const struct l_fx_efx_ncob_ecob *model_buf = cfg->model_buf;
1115
0
  struct l_fx_efx_ncob_ecob *up_model_buf = NULL;
1116
0
  const struct l_fx_efx_ncob_ecob *next_model_p;
1117
0
  struct l_fx_efx_ncob_ecob model;
1118
0
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob, setup_efx,
1119
0
            setup_ecob, setup_fx_var, setup_cob_var;
1120
1121
0
  if (model_mode_is_used(cfg->cmp_mode)) {
1122
0
    model = model_buf[0];
1123
0
    next_model_p = &model_buf[1];
1124
0
    up_model_buf = cfg->updated_model_buf;
1125
0
  } else {
1126
0
    memset(&model, 0, sizeof(model));
1127
0
    next_model_p = data_buf;
1128
0
  }
1129
1130
0
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
1131
0
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
1132
0
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
1133
0
        cfg->round, MAX_USED_BITS.l_fx, cfg);
1134
0
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
1135
0
        cfg->round, MAX_USED_BITS.l_ncob, cfg);
1136
0
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
1137
0
        cfg->round, MAX_USED_BITS.l_efx, cfg);
1138
0
  configure_encoder_setup(&setup_ecob, cfg->cmp_par_ecob, cfg->spill_ecob,
1139
0
        cfg->round, MAX_USED_BITS.l_ecob, cfg);
1140
  /* we use compression parameters for both variance data fields */
1141
0
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1142
0
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1143
0
  configure_encoder_setup(&setup_cob_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1144
0
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1145
1146
0
  for (i = 0;; i++) {
1147
0
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
1148
0
            stream_len, &setup_exp_flag);
1149
0
    if (cmp_is_error(stream_len))
1150
0
      break;
1151
0
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
1152
0
            &setup_fx);
1153
0
    if (cmp_is_error(stream_len))
1154
0
      break;
1155
0
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
1156
0
            stream_len, &setup_ncob);
1157
0
    if (cmp_is_error(stream_len))
1158
0
      break;
1159
0
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
1160
0
            stream_len, &setup_ncob);
1161
0
    if (cmp_is_error(stream_len))
1162
0
      break;
1163
0
    stream_len = encode_value(data_buf[i].efx, model.efx,
1164
0
            stream_len, &setup_efx);
1165
0
    if (cmp_is_error(stream_len))
1166
0
      break;
1167
0
    stream_len = encode_value(data_buf[i].ecob_x, model.ecob_x,
1168
0
            stream_len, &setup_ecob);
1169
0
    if (cmp_is_error(stream_len))
1170
0
      break;
1171
0
    stream_len = encode_value(data_buf[i].ecob_y, model.ecob_y,
1172
0
            stream_len, &setup_ecob);
1173
0
    if (cmp_is_error(stream_len))
1174
0
      break;
1175
0
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
1176
0
            stream_len, &setup_fx_var);
1177
0
    if (cmp_is_error(stream_len))
1178
0
      break;
1179
0
    stream_len = encode_value(data_buf[i].cob_x_variance, model.cob_x_variance,
1180
0
            stream_len, &setup_cob_var);
1181
0
    if (cmp_is_error(stream_len))
1182
0
      break;
1183
0
    stream_len = encode_value(data_buf[i].cob_y_variance, model.cob_y_variance,
1184
0
            stream_len, &setup_cob_var);
1185
0
    if (cmp_is_error(stream_len))
1186
0
      break;
1187
1188
0
    if (up_model_buf) {
1189
0
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
1190
0
        cfg->model_value, setup_exp_flag.lossy_par);
1191
0
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
1192
0
        cfg->model_value, setup_fx.lossy_par);
1193
0
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
1194
0
        cfg->model_value, setup_ncob.lossy_par);
1195
0
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
1196
0
        cfg->model_value, setup_ncob.lossy_par);
1197
0
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
1198
0
        cfg->model_value, setup_efx.lossy_par);
1199
0
      up_model_buf[i].ecob_x = cmp_up_model(data_buf[i].ecob_x, model.ecob_x,
1200
0
        cfg->model_value, setup_ecob.lossy_par);
1201
0
      up_model_buf[i].ecob_y = cmp_up_model(data_buf[i].ecob_y, model.ecob_y,
1202
0
        cfg->model_value, setup_ecob.lossy_par);
1203
0
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
1204
0
        cfg->model_value, setup_fx_var.lossy_par);
1205
0
      up_model_buf[i].cob_x_variance = cmp_up_model(data_buf[i].cob_x_variance, model.cob_x_variance,
1206
0
        cfg->model_value, setup_cob_var.lossy_par);
1207
0
      up_model_buf[i].cob_y_variance = cmp_up_model(data_buf[i].cob_y_variance, model.cob_y_variance,
1208
0
        cfg->model_value, setup_cob_var.lossy_par);
1209
0
    }
1210
1211
0
    if (i >= cfg->samples-1)
1212
0
      break;
1213
1214
0
    model = next_model_p[i];
1215
0
  }
1216
0
  return stream_len;
1217
0
}
1218
1219
1220
/**
1221
 * @brief compress offset data from the normal and fast cameras
1222
 *
1223
 * @param cfg   pointer to the compression configuration structure
1224
 * @param stream_len  already used length of the bitstream in bits
1225
 *
1226
 * @returns the bit length of the bitstream on success or an error code if it
1227
 *  fails (which can be tested with cmp_is_error())
1228
 */
1229
1230
static uint32_t compress_offset(const struct cmp_cfg *cfg, uint32_t stream_len)
1231
0
{
1232
0
  size_t i;
1233
1234
0
  const struct offset *data_buf = cfg->src;
1235
0
  const struct offset *model_buf = cfg->model_buf;
1236
0
  struct offset *up_model_buf = NULL;
1237
0
  const struct offset *next_model_p;
1238
0
  struct offset model;
1239
0
  struct encoder_setup setup_mean, setup_var;
1240
1241
0
  if (model_mode_is_used(cfg->cmp_mode)) {
1242
0
    model = model_buf[0];
1243
0
    next_model_p = &model_buf[1];
1244
0
    up_model_buf = cfg->updated_model_buf;
1245
0
  } else {
1246
0
    memset(&model, 0, sizeof(model));
1247
0
    next_model_p = data_buf;
1248
0
  }
1249
1250
0
  {
1251
0
    unsigned int mean_bits_used, variance_bits_used;
1252
1253
0
    if (cfg->data_type == DATA_TYPE_F_CAM_OFFSET) {
1254
0
      mean_bits_used = MAX_USED_BITS.fc_offset_mean;
1255
0
      variance_bits_used = MAX_USED_BITS.fc_offset_variance;
1256
0
    } else { /* DATA_TYPE_OFFSET */
1257
0
      mean_bits_used = MAX_USED_BITS.nc_offset_mean;
1258
0
      variance_bits_used = MAX_USED_BITS.nc_offset_variance;
1259
0
    }
1260
1261
0
    configure_encoder_setup(&setup_mean, cfg->cmp_par_offset_mean, cfg->spill_offset_mean,
1262
0
          cfg->round, mean_bits_used, cfg);
1263
0
    configure_encoder_setup(&setup_var, cfg->cmp_par_offset_variance, cfg->spill_offset_variance,
1264
0
          cfg->round, variance_bits_used, cfg);
1265
0
  }
1266
1267
0
  for (i = 0;; i++) {
1268
0
    stream_len = encode_value(data_buf[i].mean, model.mean,
1269
0
            stream_len, &setup_mean);
1270
0
    if (cmp_is_error(stream_len))
1271
0
      return stream_len;
1272
0
    stream_len = encode_value(data_buf[i].variance, model.variance,
1273
0
            stream_len, &setup_var);
1274
0
    if (cmp_is_error(stream_len))
1275
0
      return stream_len;
1276
1277
0
    if (up_model_buf) {
1278
0
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean, model.mean,
1279
0
        cfg->model_value, setup_mean.lossy_par);
1280
0
      up_model_buf[i].variance = cmp_up_model(data_buf[i].variance, model.variance,
1281
0
        cfg->model_value, setup_var.lossy_par);
1282
0
    }
1283
1284
0
    if (i >= cfg->samples-1)
1285
0
      break;
1286
1287
0
    model = next_model_p[i];
1288
0
  }
1289
0
  return stream_len;
1290
0
}
1291
1292
1293
/**
1294
 * @brief compress background data from the normal and fast cameras
1295
 *
1296
 * @param cfg   pointer to the compression configuration structure
1297
 * @param stream_len  already used length of the bitstream in bits
1298
 *
1299
 * @returns the bit length of the bitstream on success or an error code if it
1300
 *  fails (which can be tested with cmp_is_error())
1301
 */
1302
1303
static uint32_t compress_background(const struct cmp_cfg *cfg, uint32_t stream_len)
1304
0
{
1305
0
  size_t i;
1306
1307
0
  const struct background *data_buf = cfg->src;
1308
0
  const struct background *model_buf = cfg->model_buf;
1309
0
  struct background *up_model_buf = NULL;
1310
0
  const struct background *next_model_p;
1311
0
  struct background model;
1312
0
  struct encoder_setup setup_mean, setup_var, setup_pix;
1313
1314
0
  if (model_mode_is_used(cfg->cmp_mode)) {
1315
0
    model = model_buf[0];
1316
0
    next_model_p = &model_buf[1];
1317
0
    up_model_buf = cfg->updated_model_buf;
1318
0
  } else {
1319
0
    memset(&model, 0, sizeof(model));
1320
0
    next_model_p = data_buf;
1321
0
  }
1322
1323
0
  {
1324
0
    unsigned int mean_used_bits, varinace_used_bits, pixels_error_used_bits;
1325
1326
0
    if (cfg->data_type == DATA_TYPE_F_CAM_BACKGROUND) {
1327
0
      mean_used_bits = MAX_USED_BITS.fc_background_mean;
1328
0
      varinace_used_bits = MAX_USED_BITS.fc_background_variance;
1329
0
      pixels_error_used_bits = MAX_USED_BITS.fc_background_outlier_pixels;
1330
0
    } else { /* DATA_TYPE_BACKGROUND */
1331
0
      mean_used_bits = MAX_USED_BITS.nc_background_mean;
1332
0
      varinace_used_bits = MAX_USED_BITS.nc_background_variance;
1333
0
      pixels_error_used_bits = MAX_USED_BITS.nc_background_outlier_pixels;
1334
0
    }
1335
0
    configure_encoder_setup(&setup_mean, cfg->cmp_par_background_mean, cfg->spill_background_mean,
1336
0
          cfg->round, mean_used_bits, cfg);
1337
0
    configure_encoder_setup(&setup_var, cfg->cmp_par_background_variance, cfg->spill_background_variance,
1338
0
          cfg->round, varinace_used_bits, cfg);
1339
0
    configure_encoder_setup(&setup_pix, cfg->cmp_par_background_pixels_error, cfg->spill_background_pixels_error,
1340
0
          cfg->round, pixels_error_used_bits, cfg);
1341
0
  }
1342
1343
0
  for (i = 0;; i++) {
1344
0
    stream_len = encode_value(data_buf[i].mean, model.mean,
1345
0
            stream_len, &setup_mean);
1346
0
    if (cmp_is_error(stream_len))
1347
0
      return stream_len;
1348
0
    stream_len = encode_value(data_buf[i].variance, model.variance,
1349
0
            stream_len, &setup_var);
1350
0
    if (cmp_is_error(stream_len))
1351
0
      return stream_len;
1352
0
    stream_len = encode_value(data_buf[i].outlier_pixels, model.outlier_pixels,
1353
0
            stream_len, &setup_pix);
1354
0
    if (cmp_is_error(stream_len))
1355
0
      return stream_len;
1356
1357
0
    if (up_model_buf) {
1358
0
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean, model.mean,
1359
0
        cfg->model_value, setup_mean.lossy_par);
1360
0
      up_model_buf[i].variance = cmp_up_model(data_buf[i].variance, model.variance,
1361
0
        cfg->model_value, setup_var.lossy_par);
1362
0
      up_model_buf[i].outlier_pixels = cmp_up_model(data_buf[i].outlier_pixels, model.outlier_pixels,
1363
0
        cfg->model_value, setup_pix.lossy_par);
1364
0
    }
1365
1366
0
    if (i >= cfg->samples-1)
1367
0
      break;
1368
1369
0
    model = next_model_p[i];
1370
0
  }
1371
0
  return stream_len;
1372
0
}
1373
1374
1375
/**
1376
 * @brief compress smearing data from the normal cameras
1377
 *
1378
 * @param cfg   pointer to the compression configuration structure
1379
 * @param stream_len  already used length of the bitstream in bits
1380
 *
1381
 * @returns the bit length of the bitstream on success or an error code if it
1382
 *  fails (which can be tested with cmp_is_error())
1383
 */
1384
1385
static uint32_t compress_smearing(const struct cmp_cfg *cfg, uint32_t stream_len)
1386
0
{
1387
0
  size_t i;
1388
1389
0
  const struct smearing *data_buf = cfg->src;
1390
0
  const struct smearing *model_buf = cfg->model_buf;
1391
0
  struct smearing *up_model_buf = NULL;
1392
0
  const struct smearing *next_model_p;
1393
0
  struct smearing model;
1394
0
  struct encoder_setup setup_mean, setup_var_mean, setup_pix;
1395
1396
0
  if (model_mode_is_used(cfg->cmp_mode)) {
1397
0
    model = model_buf[0];
1398
0
    next_model_p = &model_buf[1];
1399
0
    up_model_buf = cfg->updated_model_buf;
1400
0
  } else {
1401
0
    memset(&model, 0, sizeof(model));
1402
0
    next_model_p = data_buf;
1403
0
  }
1404
1405
0
  configure_encoder_setup(&setup_mean, cfg->cmp_par_smearing_mean, cfg->spill_smearing_mean,
1406
0
        cfg->round, MAX_USED_BITS.smearing_mean, cfg);
1407
0
  configure_encoder_setup(&setup_var_mean, cfg->cmp_par_smearing_variance, cfg->spill_smearing_variance,
1408
0
        cfg->round, MAX_USED_BITS.smearing_variance_mean, cfg);
1409
0
  configure_encoder_setup(&setup_pix, cfg->cmp_par_smearing_pixels_error, cfg->spill_smearing_pixels_error,
1410
0
        cfg->round, MAX_USED_BITS.smearing_outlier_pixels, cfg);
1411
1412
0
  for (i = 0;; i++) {
1413
0
    stream_len = encode_value(data_buf[i].mean, model.mean,
1414
0
            stream_len, &setup_mean);
1415
0
    if (cmp_is_error(stream_len))
1416
0
      return stream_len;
1417
0
    stream_len = encode_value(data_buf[i].variance_mean, model.variance_mean,
1418
0
            stream_len, &setup_var_mean);
1419
0
    if (cmp_is_error(stream_len))
1420
0
      return stream_len;
1421
0
    stream_len = encode_value(data_buf[i].outlier_pixels, model.outlier_pixels,
1422
0
            stream_len, &setup_pix);
1423
0
    if (cmp_is_error(stream_len))
1424
0
      return stream_len;
1425
1426
0
    if (up_model_buf) {
1427
0
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean, model.mean,
1428
0
        cfg->model_value, setup_mean.lossy_par);
1429
0
      up_model_buf[i].variance_mean = cmp_up_model(data_buf[i].variance_mean, model.variance_mean,
1430
0
        cfg->model_value, setup_var_mean.lossy_par);
1431
0
      up_model_buf[i].outlier_pixels = cmp_up_model(data_buf[i].outlier_pixels, model.outlier_pixels,
1432
0
        cfg->model_value, setup_pix.lossy_par);
1433
0
    }
1434
1435
0
    if (i >= cfg->samples-1)
1436
0
      break;
1437
1438
0
    model = next_model_p[i];
1439
0
  }
1440
0
  return stream_len;
1441
0
}
1442
1443
1444
/**
1445
 * @brief check if two buffers are overlapping
1446
 * @see https://stackoverflow.com/a/325964
1447
 *
1448
 * @param buf_a   start address of the 1st buffer (can be NULL)
1449
 * @param size_a  byte size of the 1st buffer
1450
 * @param buf_b   start address of the 2nd buffer (can be NULL)
1451
 * @param size_b  byte size of the 2nd buffer
1452
 *
1453
 * @returns 0 if buffers are not overlapping, otherwise buffers are
1454
 *  overlapping
1455
 */
1456
1457
static int buffer_overlaps(const void *buf_a, size_t size_a,
1458
         const void *buf_b, size_t size_b)
1459
8.26k
{
1460
8.26k
  if (!buf_a)
1461
2.91k
    return 0;
1462
1463
5.34k
  if (!buf_b)
1464
1.99k
    return 0;
1465
1466
3.35k
  if ((const char *)buf_a < (const char *)buf_b + size_b &&
1467
3.35k
      (const char *)buf_b < (const char *)buf_a + size_a)
1468
0
    return 1;
1469
1470
3.35k
  return 0;
1471
3.35k
}
1472
1473
1474
/**
1475
 * @brief fill the last part of the bitstream with zeros
1476
 *
1477
 * @param cfg   pointer to the compression configuration structure
1478
 * @param cmp_size  length of the bitstream in bits
1479
 *
1480
 * @returns the bit length of the bitstream on success or an error code if it
1481
 *  fails (which can be tested with cmp_is_error())
1482
 */
1483
1484
static uint32_t pad_bitstream(const struct cmp_cfg *cfg, uint32_t cmp_size)
1485
3.31k
{
1486
3.31k
  unsigned int output_buf_len_bits, n_pad_bits;
1487
1488
3.31k
  if (!cfg->dst)
1489
2.89k
    return cmp_size;
1490
1491
  /* no padding in RAW mode; ALWAYS BIG-ENDIAN */
1492
424
  if (cfg->cmp_mode == CMP_MODE_RAW)
1493
316
    return cmp_size;
1494
1495
  /* maximum length of the bitstream in bits */
1496
108
  output_buf_len_bits = cmp_stream_size_to_bits(cfg->stream_size);
1497
1498
108
  n_pad_bits = 32 - (cmp_size & 0x1FU);
1499
108
  if (n_pad_bits < 32) {
1500
101
    FORWARD_IF_ERROR(put_n_bits32(0, n_pad_bits, cmp_size,
1501
101
         cfg->dst, output_buf_len_bits), "");
1502
101
  }
1503
1504
108
  return cmp_size;
1505
108
}
1506
1507
1508
/**
1509
 * @brief internal data compression function
1510
 * This function can compress all types of collection data (one at a time).
1511
 * This function does not take the header of a collection into account.
1512
 *
1513
 * @param cfg   pointer to the compression configuration structure
1514
 * @param stream_len  already used length of the bitstream in bits
1515
 *
1516
 * @note the validity of the cfg structure is not checked
1517
 *
1518
 * @returns the bit length of the bitstream on success or an error code if it
1519
 *  fails (which can be tested with cmp_is_error())
1520
 */
1521
1522
static uint32_t compress_data_internal(const struct cmp_cfg *cfg, uint32_t stream_len)
1523
4.32k
{
1524
4.32k
  uint32_t bitsize = 0;
1525
1526
4.32k
  FORWARD_IF_ERROR(stream_len, "");
1527
4.32k
  RETURN_ERROR_IF(cfg == NULL, GENERIC, "");
1528
4.32k
  RETURN_ERROR_IF(stream_len & 0x7, GENERIC, "The stream_len parameter must be a multiple of 8.");
1529
1530
4.32k
  if (cfg->samples == 0) /* nothing to compress we are done */
1531
835
    return stream_len;
1532
1533
3.49k
  if (raw_mode_is_used(cfg->cmp_mode)) {
1534
512
    uint32_t raw_size = cfg->samples * (uint32_t)size_of_a_sample(cfg->data_type);
1535
1536
512
    if (cfg->dst) {
1537
318
      uint32_t offset_bytes = stream_len >> 3;
1538
318
      uint8_t *p = (uint8_t *)cfg->dst + offset_bytes;
1539
318
      uint32_t new_stream_size = offset_bytes + raw_size;
1540
1541
318
      RETURN_ERROR_IF(new_stream_size > cfg->stream_size, SMALL_BUFFER, "");
1542
316
      memcpy(p, cfg->src, raw_size);
1543
316
      RETURN_ERROR_IF(cpu_to_be_data_type(p, raw_size, cfg->data_type),
1544
316
          INT_DATA_TYPE_UNSUPPORTED, "");
1545
316
    }
1546
510
    bitsize += stream_len + raw_size * 8; /* convert to bits */
1547
2.98k
  } else {
1548
2.98k
    switch (cfg->data_type) {
1549
2.82k
    case DATA_TYPE_IMAGETTE:
1550
2.82k
    case DATA_TYPE_IMAGETTE_ADAPTIVE:
1551
2.82k
    case DATA_TYPE_SAT_IMAGETTE:
1552
2.82k
    case DATA_TYPE_SAT_IMAGETTE_ADAPTIVE:
1553
2.82k
    case DATA_TYPE_F_CAM_IMAGETTE:
1554
2.82k
    case DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE:
1555
2.82k
      bitsize = compress_imagette(cfg, stream_len);
1556
2.82k
      break;
1557
1558
78
    case DATA_TYPE_S_FX:
1559
78
      bitsize = compress_s_fx(cfg, stream_len);
1560
78
      break;
1561
80
    case DATA_TYPE_S_FX_EFX:
1562
80
      bitsize = compress_s_fx_efx(cfg, stream_len);
1563
80
      break;
1564
0
    case DATA_TYPE_S_FX_NCOB:
1565
0
      bitsize = compress_s_fx_ncob(cfg, stream_len);
1566
0
      break;
1567
0
    case DATA_TYPE_S_FX_EFX_NCOB_ECOB:
1568
0
      bitsize = compress_s_fx_efx_ncob_ecob(cfg, stream_len);
1569
0
      break;
1570
1571
1572
0
    case DATA_TYPE_L_FX:
1573
0
      bitsize = compress_l_fx(cfg, stream_len);
1574
0
      break;
1575
0
    case DATA_TYPE_L_FX_EFX:
1576
0
      bitsize = compress_l_fx_efx(cfg, stream_len);
1577
0
      break;
1578
0
    case DATA_TYPE_L_FX_NCOB:
1579
0
      bitsize = compress_l_fx_ncob(cfg, stream_len);
1580
0
      break;
1581
0
    case DATA_TYPE_L_FX_EFX_NCOB_ECOB:
1582
0
      bitsize = compress_l_fx_efx_ncob_ecob(cfg, stream_len);
1583
0
      break;
1584
1585
0
    case DATA_TYPE_OFFSET:
1586
0
    case DATA_TYPE_F_CAM_OFFSET:
1587
0
      bitsize = compress_offset(cfg, stream_len);
1588
0
      break;
1589
0
    case DATA_TYPE_BACKGROUND:
1590
0
    case DATA_TYPE_F_CAM_BACKGROUND:
1591
0
      bitsize = compress_background(cfg, stream_len);
1592
0
      break;
1593
0
    case DATA_TYPE_SMEARING:
1594
0
      bitsize = compress_smearing(cfg, stream_len);
1595
0
      break;
1596
1597
0
    case DATA_TYPE_F_FX:
1598
0
    case DATA_TYPE_F_FX_EFX:
1599
0
    case DATA_TYPE_F_FX_NCOB:
1600
0
    case DATA_TYPE_F_FX_EFX_NCOB_ECOB:
1601
0
    case DATA_TYPE_CHUNK:
1602
0
    case DATA_TYPE_UNKNOWN:
1603
0
    default:
1604
0
      RETURN_ERROR(INT_DATA_TYPE_UNSUPPORTED, "");
1605
2.98k
    }
1606
2.98k
  }
1607
1608
3.49k
  if (cmp_is_error(bitsize))
1609
176
    return bitsize;
1610
1611
3.31k
  bitsize = pad_bitstream(cfg, bitsize);
1612
1613
3.31k
  return bitsize;
1614
3.49k
}
1615
1616
1617
/**
1618
 * @brief check if the ICU buffer parameters are invalid
1619
 *
1620
 * @param cfg pointer to the compressor configuration to check
1621
 *
1622
 * @returns 0 if the buffer parameters are valid, otherwise invalid
1623
 */
1624
1625
static uint32_t check_compression_buffers(const struct cmp_cfg *cfg)
1626
4.17k
{
1627
4.17k
  size_t data_size;
1628
1629
4.17k
  RETURN_ERROR_IF(cfg == NULL, GENERIC, "");
1630
1631
4.17k
  RETURN_ERROR_IF(cfg->src == NULL, CHUNK_NULL, "");
1632
1633
4.17k
  data_size = size_of_a_sample(cfg->data_type) * cfg->samples;
1634
1635
4.17k
  if (cfg->samples == 0)
1636
835
    debug_print("Warning: The samples parameter is 0. No data are compressed. This behavior may not be intended.");
1637
1638
4.17k
  RETURN_ERROR_IF(buffer_overlaps(cfg->dst, cfg->stream_size,
1639
4.17k
          cfg->src, data_size), PAR_BUFFERS,
1640
4.17k
    "The compressed data buffer and the data to compress buffer are overlapping.");
1641
1642
4.17k
  if (model_mode_is_used(cfg->cmp_mode)) {
1643
1.02k
    RETURN_ERROR_IF(cfg->model_buf == NULL, PAR_NO_MODEL, "");
1644
1645
1.02k
    RETURN_ERROR_IF(buffer_overlaps(cfg->model_buf, data_size,
1646
1.02k
            cfg->src, data_size), PAR_BUFFERS,
1647
1.02k
        "The model buffer and the data to compress buffer are overlapping.");
1648
1.02k
    RETURN_ERROR_IF(buffer_overlaps(cfg->model_buf, data_size,
1649
1.02k
            cfg->dst, cfg->stream_size), PAR_BUFFERS,
1650
1.02k
        "The model buffer and the compressed data buffer are overlapping.");
1651
1652
1.02k
    RETURN_ERROR_IF(buffer_overlaps(cfg->updated_model_buf, data_size,
1653
1.02k
            cfg->src, data_size), PAR_BUFFERS,
1654
1.02k
        "The updated model buffer and the data to compress buffer are overlapping.");
1655
1.02k
    RETURN_ERROR_IF(buffer_overlaps(cfg->updated_model_buf, data_size,
1656
1.02k
            cfg->dst, cfg->stream_size), PAR_BUFFERS,
1657
1.02k
        "The updated model buffer and the compressed data buffer are overlapping.");
1658
1.02k
  }
1659
1660
4.17k
  return CMP_ERROR(NO_ERROR);
1661
4.17k
}
1662
1663
1664
/**
1665
 * @brief checks if the ICU compression configuration is valid
1666
 *
1667
 * @param cfg pointer to the cmp_cfg structure to be validated
1668
 *
1669
 * @returns an error code if any of the configuration parameters are invalid,
1670
 *  otherwise returns CMP_ERROR_NO_ERROR on valid configuration
1671
 */
1672
1673
static uint32_t cmp_cfg_icu_is_invalid_error_code(const struct cmp_cfg *cfg)
1674
4.30k
{
1675
1676
4.30k
  RETURN_ERROR_IF(cmp_cfg_gen_par_is_invalid(cfg), PAR_GENERIC, "");
1677
1678
4.28k
  if (cmp_imagette_data_type_is_used(cfg->data_type))
1679
3.25k
    RETURN_ERROR_IF(cmp_cfg_imagette_is_invalid(cfg), PAR_SPECIFIC, "");
1680
1.03k
  else if (cmp_fx_cob_data_type_is_used(cfg->data_type))
1681
902
    RETURN_ERROR_IF(cmp_cfg_fx_cob_is_invalid(cfg), PAR_SPECIFIC, "");
1682
132
  else
1683
132
    RETURN_ERROR_IF(cmp_cfg_aux_is_invalid(cfg), PAR_SPECIFIC, "");
1684
1685
4.17k
  FORWARD_IF_ERROR(check_compression_buffers(cfg), "");
1686
1687
4.17k
  return CMP_ERROR(NO_ERROR);
1688
4.17k
}
1689
1690
1691
/**
1692
 * @brief calculate the optimal spill threshold value for zero escape mechanism
1693
 *
1694
 * @param golomb_par  Golomb parameter
1695
 * @param max_data_bits maximum number of used data bits
1696
 *
1697
 * @returns the highest optimal spill threshold value for a given Golomb
1698
 *  parameter, when the zero escape mechanism is used or 0 if the
1699
 *  Golomb parameter is not valid
1700
 */
1701
1702
static uint32_t cmp_best_zero_spill(uint32_t golomb_par, uint32_t max_data_bits)
1703
403
{
1704
403
  uint32_t const max_spill = cmp_icu_max_spill(golomb_par);
1705
403
  uint32_t cutoff;
1706
403
  uint32_t spill;
1707
1708
403
  if (golomb_par < MIN_NON_IMA_GOLOMB_PAR)
1709
230
    return 0;
1710
173
  if (golomb_par > MAX_NON_IMA_GOLOMB_PAR)
1711
34
    return 0;
1712
1713
139
  cutoff = (0x2U << ilog_2(golomb_par)) - golomb_par;
1714
139
  spill = max_data_bits * golomb_par + cutoff;
1715
139
  if (spill > max_spill)
1716
99
    spill = max_spill;
1717
1718
139
  return spill;
1719
173
}
1720
1721
1722
/**
1723
 * @brief estimate a "good" spillover threshold parameter
1724
 *
1725
 * @param golomb_par  Golomb parameter
1726
 * @param cmp_mode  compression mode
1727
 * @param max_data_bits maximum number of used data bits
1728
 *
1729
 * @returns a spillover threshold parameter or 0 if the Golomb parameter is not
1730
 *  valid
1731
 */
1732
1733
static uint32_t cmp_get_spill(uint32_t golomb_par, enum cmp_mode cmp_mode,
1734
            uint32_t max_data_bits)
1735
1.27k
{
1736
1.27k
  if (zero_escape_mech_is_used(cmp_mode))
1737
403
    return cmp_best_zero_spill(golomb_par, max_data_bits);
1738
1739
873
  return cmp_icu_max_spill(golomb_par);
1740
1.27k
}
1741
1742
1743
/**
1744
 * @brief set the compressed collection size field
1745
 *
1746
 * @param cmp_col_size_field  pointer to the compressed collection size field
1747
 * @param cmp_col_size    size of the compressed collection (not including
1748
 *        the compressed collection header size and the
1749
 *        size of the compressed collection size field
1750
 *        itself)
1751
 *
1752
 * @returns error code
1753
 */
1754
1755
static uint32_t set_cmp_col_size(uint8_t *cmp_col_size_field, uint32_t cmp_col_size)
1756
289
{
1757
289
  uint16_t const v = cpu_to_be16((uint16_t)cmp_col_size);
1758
1759
289
  RETURN_ERROR_IF(cmp_col_size > UINT16_MAX, INT_CMP_COL_TOO_LARGE,
1760
289
      "%"PRIu32" is bigger than the maximum allowed compression collection size", cmp_col_size_field);
1761
1762
289
  memcpy(cmp_col_size_field, &v, CMP_COLLECTION_FILD_SIZE);
1763
1764
289
  return 0;
1765
289
}
1766
1767
1768
/**
1769
 * @brief compresses a collection (with a collection header followed by data)
1770
 *
1771
 * @param col   pointer to a collection header
1772
 * @param model   pointer to the model to be used for compression, or NULL
1773
 *      if not applicable
1774
 * @param updated_model pointer to the buffer where the updated model will be
1775
 *      stored, or NULL if not applicable
1776
 * @param dst   pointer to the buffer where the compressed data will be
1777
 *      stored, or NULL to only get the compressed data size
1778
 * @param dst_capacity  the size of the dst buffer in bytes
1779
 * @param cfg   pointer to a compression configuration
1780
 * @param dst_size  the already used size of the dst buffer in bytes
1781
 *
1782
 * @returns the size of the compressed data in bytes (new dst_size) on
1783
 *  success or an error code if it fails (which can be tested with
1784
 *  cmp_is_error())
1785
 */
1786
static uint32_t cmp_collection(const uint8_t *col,
1787
             const uint8_t *model, uint8_t *updated_model,
1788
             uint32_t *dst, uint32_t dst_capacity,
1789
             struct cmp_cfg *cfg, uint32_t dst_size)
1790
1.20k
{
1791
1.20k
  uint32_t const dst_size_begin = dst_size;
1792
1.20k
  uint32_t dst_size_bits;
1793
1.20k
  const struct collection_hdr *col_hdr = (const struct collection_hdr *)col;
1794
1.20k
  uint16_t const col_data_length = cmp_col_get_data_length(col_hdr);
1795
1.20k
  uint16_t sample_size;
1796
1797
  /* sanity check of the collection header */
1798
1.20k
  cfg->data_type = convert_subservice_to_cmp_data_type(cmp_col_get_subservice(col_hdr));
1799
1.20k
  sample_size = (uint16_t)size_of_a_sample(cfg->data_type);
1800
1.20k
  RETURN_ERROR_IF(col_data_length % sample_size, COL_SIZE_INCONSISTENT,
1801
1.20k
      "col_data_length: %u %% sample_size: %u != 0", col_data_length, sample_size);
1802
1.20k
  cfg->samples = col_data_length/sample_size;
1803
1804
  /* prepare the different buffers */
1805
1.20k
  cfg->src = col + COLLECTION_HDR_SIZE;
1806
1.20k
  if (model)
1807
18
    cfg->model_buf = model + COLLECTION_HDR_SIZE;
1808
1.20k
  if (updated_model)
1809
18
    cfg->updated_model_buf = updated_model + COLLECTION_HDR_SIZE;
1810
1.20k
  cfg->dst = dst;
1811
1.20k
  cfg->stream_size = dst_capacity;
1812
1.20k
  FORWARD_IF_ERROR(cmp_cfg_icu_is_invalid_error_code(cfg), "");
1813
1814
1.14k
  if (cfg->cmp_mode != CMP_MODE_RAW) {
1815
    /* hear we reserve space for the compressed data size field */
1816
289
    dst_size += CMP_COLLECTION_FILD_SIZE;
1817
289
  }
1818
1819
  /* we do not compress the collection header, we simply copy the header
1820
   * into the compressed data
1821
   */
1822
1.14k
  if (dst) {
1823
1.14k
    RETURN_ERROR_IF(dst_size + COLLECTION_HDR_SIZE > dst_capacity,
1824
1.14k
        SMALL_BUFFER, "");
1825
1.14k
    memcpy((uint8_t *)dst + dst_size, col, COLLECTION_HDR_SIZE);
1826
1.14k
  }
1827
1.14k
  dst_size += COLLECTION_HDR_SIZE;
1828
1.14k
  if (model_mode_is_used(cfg->cmp_mode) && updated_model)
1829
3
    memcpy(updated_model, col, COLLECTION_HDR_SIZE);
1830
1831
  /* is enough capacity in the dst buffer to store the data uncompressed */
1832
1.14k
  if ((dst == NULL || dst_capacity >= dst_size + col_data_length) &&
1833
1.14k
      cfg->cmp_mode != CMP_MODE_RAW) {
1834
    /* we set the compressed buffer size to the data size -1 to provoke
1835
     * a CMP_ERROR_SMALL_BUFFER error if the data are not compressible
1836
     */
1837
289
    cfg->stream_size = dst_size + col_data_length - 1;
1838
289
    dst_size_bits = compress_data_internal(cfg, dst_size << 3);
1839
1840
289
    if (cmp_get_error_code(dst_size_bits) == CMP_ERROR_SMALL_BUFFER ||
1841
289
        (!dst && dst_size_bits > cmp_stream_size_to_bits(cfg->stream_size))) { /* if dst == NULL compress_data_internal will not return a CMP_ERROR_SMALL_BUFFER */
1842
      /* can not compress the data with the given parameters;
1843
       * put them uncompressed (raw) into the dst buffer */
1844
156
      enum cmp_mode cmp_mode_cpy = cfg->cmp_mode;
1845
1846
156
      cfg->stream_size = dst_size + col_data_length;
1847
156
      cfg->cmp_mode = CMP_MODE_RAW;
1848
156
      dst_size_bits = compress_data_internal(cfg, dst_size << 3);
1849
156
      cfg->cmp_mode = cmp_mode_cpy;
1850
      /* updated model is in this case a copy of the data to compress */
1851
156
      if (model_mode_is_used(cfg->cmp_mode) && cfg->updated_model_buf)
1852
0
        memcpy(cfg->updated_model_buf, cfg->src, col_data_length);
1853
156
    }
1854
854
  } else {
1855
854
    cfg->stream_size = dst_capacity;
1856
854
    dst_size_bits = compress_data_internal(cfg, dst_size << 3);
1857
854
  }
1858
1.14k
  FORWARD_IF_ERROR(dst_size_bits, "compression failed");
1859
1860
1.14k
  dst_size = cmp_bit_to_byte(dst_size_bits);
1861
1.14k
  if (cfg->cmp_mode != CMP_MODE_RAW && dst) {
1862
289
    uint8_t *cmp_col_size_field = (uint8_t *)dst+dst_size_begin;
1863
289
    uint32_t cmp_col_size = dst_size - dst_size_begin -
1864
289
      COLLECTION_HDR_SIZE - CMP_COLLECTION_FILD_SIZE;
1865
1866
289
    FORWARD_IF_ERROR(set_cmp_col_size(cmp_col_size_field, cmp_col_size), "");
1867
289
  }
1868
1869
1.14k
  return dst_size;
1870
1.14k
}
1871
1872
1873
/**
1874
 * @brief builds a compressed entity header for a compressed chunk
1875
 *
1876
 * @param entity    start address of the compression entity header
1877
 *        (can be NULL if you only want the entity header
1878
 *        size)
1879
 * @param chunk_size    the original size of the chunk in bytes
1880
 * @param cfg     pointer to the compression configuration used to
1881
 *        compress the chunk
1882
 * @param start_timestamp the start timestamp of the chunk compression
1883
 * @param cmp_ent_size_byte the size of the compression entity (entity
1884
 *        header plus compressed data)
1885
 *
1886
 * @return the size of the compressed entity header in bytes or an error code
1887
 *  if it fails (which can be tested with cmp_is_error())
1888
 */
1889
1890
static uint32_t cmp_ent_build_chunk_header(uint32_t *entity, uint32_t chunk_size,
1891
             const struct cmp_cfg *cfg, uint64_t start_timestamp,
1892
             uint32_t cmp_ent_size_byte)
1893
429
{
1894
429
  if (entity) { /* setup the compressed entity header */
1895
160
    struct cmp_entity *ent = (struct cmp_entity *)entity;
1896
160
    int err = 0;
1897
1898
160
    err |= cmp_ent_set_version_id(ent, version_identifier); /* set by compress_chunk_init */
1899
160
    err |= cmp_ent_set_size(ent, cmp_ent_size_byte);
1900
160
    err |= cmp_ent_set_original_size(ent, chunk_size);
1901
160
    err |= cmp_ent_set_data_type(ent, DATA_TYPE_CHUNK, cfg->cmp_mode == CMP_MODE_RAW);
1902
160
    err |= cmp_ent_set_cmp_mode(ent, cfg->cmp_mode);
1903
160
    err |= cmp_ent_set_model_value(ent, cfg->model_value);
1904
    /* model id/counter are set by the user with the compress_chunk_set_model_id_and_counter() */
1905
160
    err |= cmp_ent_set_model_id(ent, 0);
1906
160
    err |= cmp_ent_set_model_counter(ent, 0);
1907
160
    err |= cmp_ent_set_reserved(ent, 0);
1908
160
    err |= cmp_ent_set_lossy_cmp_par(ent, cfg->round);
1909
160
    if (cfg->cmp_mode != CMP_MODE_RAW) {
1910
60
      err |= cmp_ent_set_non_ima_spill1(ent, cfg->spill_par_1);
1911
60
      err |= cmp_ent_set_non_ima_cmp_par1(ent, cfg->cmp_par_1);
1912
60
      err |= cmp_ent_set_non_ima_spill2(ent, cfg->spill_par_2);
1913
60
      err |= cmp_ent_set_non_ima_cmp_par2(ent, cfg->cmp_par_2);
1914
60
      err |= cmp_ent_set_non_ima_spill3(ent, cfg->spill_par_3);
1915
60
      err |= cmp_ent_set_non_ima_cmp_par3(ent, cfg->cmp_par_3);
1916
60
      err |= cmp_ent_set_non_ima_spill4(ent, cfg->spill_par_4);
1917
60
      err |= cmp_ent_set_non_ima_cmp_par4(ent, cfg->cmp_par_4);
1918
60
      err |= cmp_ent_set_non_ima_spill5(ent, cfg->spill_par_5);
1919
60
      err |= cmp_ent_set_non_ima_cmp_par5(ent, cfg->cmp_par_5);
1920
60
      err |= cmp_ent_set_non_ima_spill6(ent, cfg->spill_par_6);
1921
60
      err |= cmp_ent_set_non_ima_cmp_par6(ent, cfg->cmp_par_6);
1922
60
    }
1923
160
    RETURN_ERROR_IF(err, ENTITY_HEADER, "");
1924
138
    RETURN_ERROR_IF(cmp_ent_set_start_timestamp(ent, start_timestamp),
1925
138
        ENTITY_TIMESTAMP, "");
1926
138
    RETURN_ERROR_IF(cmp_ent_set_end_timestamp(ent, get_timestamp()),
1927
138
        ENTITY_TIMESTAMP, "");
1928
138
  }
1929
1930
407
  if (cfg->cmp_mode == CMP_MODE_RAW)
1931
238
    return GENERIC_HEADER_SIZE;
1932
169
  else
1933
169
    return NON_IMAGETTE_HEADER_SIZE;
1934
407
}
1935
1936
1937
/**
1938
 * @brief Set the compression configuration from the compression parameters
1939
 *  based on the chunk type of the collection
1940
 *
1941
 * @param[in] col pointer to a collection header
1942
 * @param[in] par pointer to a compression parameters struct
1943
 * @param[out] cfg  pointer to a compression configuration
1944
 *
1945
 * @returns the chunk type of the collection
1946
 */
1947
1948
static enum chunk_type init_cmp_cfg_from_cmp_par(const struct collection_hdr *col,
1949
             const struct cmp_par *par,
1950
             struct cmp_cfg *cfg)
1951
285
{
1952
285
  enum chunk_type chunk_type = cmp_col_get_chunk_type(col);
1953
1954
285
  memset(cfg, 0, sizeof(struct cmp_cfg));
1955
1956
  /* the ranges of the parameters are checked in cmp_cfg_icu_is_invalid_error_code() */
1957
285
  cfg->cmp_mode = par->cmp_mode;
1958
285
  cfg->model_value = par->model_value;
1959
285
  if (par->lossy_par)
1960
0
    debug_print("Warning: lossy compression is not supported for chunk compression, lossy_par will be ignored.");
1961
285
  cfg->round = 0;
1962
1963
285
  switch (chunk_type) {
1964
15
  case CHUNK_TYPE_NCAM_IMAGETTE:
1965
15
    cfg->cmp_par_imagette = par->nc_imagette;
1966
15
    cfg->spill_imagette = cmp_get_spill(cfg->cmp_par_imagette, cfg->cmp_mode,
1967
15
                MAX_USED_BITS.nc_imagette);
1968
15
    break;
1969
13
  case CHUNK_TYPE_SAT_IMAGETTE:
1970
13
    cfg->cmp_par_imagette = par->saturated_imagette;
1971
13
    cfg->spill_imagette = cmp_get_spill(cfg->cmp_par_imagette, cfg->cmp_mode,
1972
13
                MAX_USED_BITS.saturated_imagette);
1973
13
    break;
1974
98
  case CHUNK_TYPE_SHORT_CADENCE:
1975
98
    cfg->cmp_par_exp_flags = par->s_exp_flags;
1976
98
    cfg->spill_exp_flags = cmp_get_spill(cfg->cmp_par_exp_flags, cfg->cmp_mode,
1977
98
                 MAX_USED_BITS.s_exp_flags);
1978
98
    cfg->cmp_par_fx = par->s_fx;
1979
98
    cfg->spill_fx = cmp_get_spill(cfg->cmp_par_fx, cfg->cmp_mode,
1980
98
                MAX_USED_BITS.s_fx);
1981
98
    cfg->cmp_par_ncob = par->s_ncob;
1982
98
    cfg->spill_ncob = cmp_get_spill(cfg->cmp_par_ncob, cfg->cmp_mode,
1983
98
            MAX_USED_BITS.s_ncob);
1984
98
    cfg->cmp_par_efx = par->s_efx;
1985
98
    cfg->spill_efx = cmp_get_spill(cfg->cmp_par_efx, cfg->cmp_mode,
1986
98
                 MAX_USED_BITS.s_efx);
1987
98
    cfg->cmp_par_ecob = par->s_ecob;
1988
98
    cfg->spill_ecob = cmp_get_spill(cfg->cmp_par_ecob, cfg->cmp_mode,
1989
98
            MAX_USED_BITS.s_ecob);
1990
98
    break;
1991
55
  case CHUNK_TYPE_LONG_CADENCE:
1992
55
    cfg->cmp_par_exp_flags = par->l_exp_flags;
1993
55
    cfg->spill_exp_flags = cmp_get_spill(cfg->cmp_par_exp_flags, cfg->cmp_mode,
1994
55
                 MAX_USED_BITS.l_exp_flags);
1995
55
    cfg->cmp_par_fx = par->l_fx;
1996
55
    cfg->spill_fx = cmp_get_spill(cfg->cmp_par_fx, cfg->cmp_mode,
1997
55
                MAX_USED_BITS.l_fx);
1998
55
    cfg->cmp_par_ncob = par->l_ncob;
1999
55
    cfg->spill_ncob = cmp_get_spill(cfg->cmp_par_ncob, cfg->cmp_mode,
2000
55
            MAX_USED_BITS.l_ncob);
2001
55
    cfg->cmp_par_efx = par->l_efx;
2002
55
    cfg->spill_efx = cmp_get_spill(cfg->cmp_par_efx, cfg->cmp_mode,
2003
55
                 MAX_USED_BITS.l_efx);
2004
55
    cfg->cmp_par_ecob = par->l_ecob;
2005
55
    cfg->spill_ecob = cmp_get_spill(cfg->cmp_par_ecob, cfg->cmp_mode,
2006
55
            MAX_USED_BITS.l_ecob);
2007
55
    cfg->cmp_par_fx_cob_variance = par->l_fx_cob_variance;
2008
55
    cfg->spill_fx_cob_variance = cmp_get_spill(cfg->cmp_par_fx_cob_variance,
2009
55
                 cfg->cmp_mode, MAX_USED_BITS.l_fx_cob_variance);
2010
55
    break;
2011
34
  case CHUNK_TYPE_OFFSET_BACKGROUND:
2012
34
    cfg->cmp_par_offset_mean = par->nc_offset_mean;
2013
34
    cfg->spill_offset_mean = cmp_get_spill(cfg->cmp_par_offset_mean,
2014
34
            cfg->cmp_mode, MAX_USED_BITS.nc_offset_mean);
2015
34
    cfg->cmp_par_offset_variance = par->nc_offset_variance;
2016
34
    cfg->spill_offset_variance = cmp_get_spill(cfg->cmp_par_offset_variance,
2017
34
            cfg->cmp_mode, MAX_USED_BITS.nc_offset_variance);
2018
34
    cfg->cmp_par_background_mean = par->nc_background_mean;
2019
34
    cfg->spill_background_mean = cmp_get_spill(cfg->cmp_par_background_mean,
2020
34
            cfg->cmp_mode, MAX_USED_BITS.nc_background_mean);
2021
34
    cfg->cmp_par_background_variance = par->nc_background_variance;
2022
34
    cfg->spill_background_variance = cmp_get_spill(cfg->cmp_par_background_variance,
2023
34
            cfg->cmp_mode, MAX_USED_BITS.nc_background_variance);
2024
34
    cfg->cmp_par_background_pixels_error = par->nc_background_outlier_pixels;
2025
34
    cfg->spill_background_pixels_error = cmp_get_spill(cfg->cmp_par_background_pixels_error,
2026
34
            cfg->cmp_mode, MAX_USED_BITS.nc_background_outlier_pixels);
2027
34
    break;
2028
2029
22
  case CHUNK_TYPE_SMEARING:
2030
22
    cfg->cmp_par_smearing_mean = par->smearing_mean;
2031
22
    cfg->spill_smearing_mean = cmp_get_spill(cfg->cmp_par_smearing_mean,
2032
22
            cfg->cmp_mode, MAX_USED_BITS.smearing_mean);
2033
22
    cfg->cmp_par_smearing_variance = par->smearing_variance_mean;
2034
22
    cfg->spill_smearing_variance = cmp_get_spill(cfg->cmp_par_smearing_variance,
2035
22
            cfg->cmp_mode, MAX_USED_BITS.smearing_variance_mean);
2036
22
    cfg->cmp_par_smearing_pixels_error = par->smearing_outlier_pixels;
2037
22
    cfg->spill_smearing_pixels_error = cmp_get_spill(cfg->cmp_par_smearing_pixels_error,
2038
22
            cfg->cmp_mode, MAX_USED_BITS.smearing_outlier_pixels);
2039
22
    break;
2040
2041
32
  case CHUNK_TYPE_F_CHAIN:
2042
32
    cfg->cmp_par_imagette = par->fc_imagette;
2043
32
    cfg->spill_imagette = cmp_get_spill(cfg->cmp_par_imagette,
2044
32
            cfg->cmp_mode, MAX_USED_BITS.fc_imagette);
2045
2046
32
    cfg->cmp_par_offset_mean = par->fc_offset_mean;
2047
32
    cfg->spill_offset_mean = cmp_get_spill(cfg->cmp_par_offset_mean,
2048
32
            cfg->cmp_mode, MAX_USED_BITS.fc_offset_mean);
2049
32
    cfg->cmp_par_offset_variance = par->fc_offset_variance;
2050
32
    cfg->spill_offset_variance = cmp_get_spill(cfg->cmp_par_offset_variance,
2051
32
            cfg->cmp_mode, MAX_USED_BITS.fc_offset_variance);
2052
2053
32
    cfg->cmp_par_background_mean = par->fc_background_mean;
2054
32
    cfg->spill_background_mean = cmp_get_spill(cfg->cmp_par_background_mean,
2055
32
            cfg->cmp_mode, MAX_USED_BITS.fc_background_mean);
2056
32
    cfg->cmp_par_background_variance = par->fc_background_variance;
2057
32
    cfg->spill_background_variance = cmp_get_spill(cfg->cmp_par_background_variance,
2058
32
            cfg->cmp_mode, MAX_USED_BITS.fc_background_variance);
2059
32
    cfg->cmp_par_background_pixels_error = par->fc_background_outlier_pixels;
2060
32
    cfg->spill_background_pixels_error = cmp_get_spill(cfg->cmp_par_background_pixels_error,
2061
32
            cfg->cmp_mode, MAX_USED_BITS.fc_background_outlier_pixels);
2062
32
    break;
2063
16
  case CHUNK_TYPE_UNKNOWN:
2064
16
  default: /*
2065
      * default case never reached because cmp_col_get_chunk_type
2066
      * returns CHUNK_TYPE_UNKNOWN if the type is unknown
2067
      */
2068
16
    chunk_type = CHUNK_TYPE_UNKNOWN;
2069
16
    break;
2070
285
  }
2071
2072
285
  return chunk_type;
2073
285
}
2074
2075
2076
/**
2077
 * @brief initialise the compress_chunk() function
2078
 *
2079
 * If not initialised the compress_chunk() function sets the timestamps and
2080
 * version_id in the compression entity header to zero
2081
 *
2082
 * @param return_timestamp  pointer to a function returning a current 48-bit
2083
 *        timestamp
2084
 * @param version_id    application software version identifier
2085
 */
2086
2087
void compress_chunk_init(uint64_t (*return_timestamp)(void), uint32_t version_id)
2088
285
{
2089
285
  if (return_timestamp)
2090
285
    get_timestamp = return_timestamp;
2091
2092
285
  version_identifier = version_id;
2093
285
}
2094
2095
2096
/**
2097
 * @brief compress a data chunk consisting of put together data collections
2098
 *
2099
 * @param chunk     pointer to the chunk to be compressed
2100
 * @param chunk_size    byte size of the chunk
2101
 * @param chunk_model   pointer to a model of a chunk; has the same size
2102
 *        as the chunk (can be NULL if no model compression
2103
 *        mode is used)
2104
 * @param updated_chunk_model pointer to store the updated model for the next
2105
 *        model mode compression; has the same size as the
2106
 *        chunk (can be the same as the model_of_data
2107
 *        buffer for in-place update or NULL if updated
2108
 *        model is not needed)
2109
 * @param dst     destination pointer to the compressed data
2110
 *        buffer; has to be 4-byte aligned; can be NULL to
2111
 *        only get the compressed data size
2112
 * @param dst_capacity    capacity of the dst buffer; it's recommended to
2113
 *        provide a dst_capacity >=
2114
 *        compress_chunk_cmp_size_bound(chunk, chunk_size)
2115
 *        as it eliminates one potential failure scenario:
2116
 *        not enough space in the dst buffer to write the
2117
 *        compressed data; size is internally rounded down
2118
 *        to a multiple of 4
2119
 * @param cmp_par   pointer to a compression parameters struct
2120
 * @returns the byte size of the compressed data or an error code if it
2121
 *  fails (which can be tested with cmp_is_error())
2122
 */
2123
2124
uint32_t compress_chunk(const void *chunk, uint32_t chunk_size,
2125
      const void *chunk_model, void *updated_chunk_model,
2126
      uint32_t *dst, uint32_t dst_capacity,
2127
      const struct cmp_par *cmp_par)
2128
285
{
2129
285
  uint64_t const start_timestamp = get_timestamp();
2130
285
  const struct collection_hdr *col = (const struct collection_hdr *)chunk;
2131
285
  enum chunk_type chunk_type;
2132
285
  struct cmp_cfg cfg;
2133
285
  uint32_t cmp_size_byte; /* size of the compressed data in bytes */
2134
285
  size_t read_bytes;
2135
2136
285
  RETURN_ERROR_IF(chunk == NULL, CHUNK_NULL, "");
2137
285
  RETURN_ERROR_IF(cmp_par == NULL, PAR_NULL, "");
2138
285
  RETURN_ERROR_IF(chunk_size < COLLECTION_HDR_SIZE, CHUNK_SIZE_INCONSISTENT,
2139
285
      "chunk_size: %"PRIu32"", chunk_size);
2140
285
  RETURN_ERROR_IF(chunk_size > CMP_ENTITY_MAX_ORIGINAL_SIZE, CHUNK_TOO_LARGE,
2141
285
      "chunk_size: %"PRIu32"", chunk_size);
2142
2143
285
  chunk_type = init_cmp_cfg_from_cmp_par(col, cmp_par, &cfg);
2144
285
  RETURN_ERROR_IF(chunk_type == CHUNK_TYPE_UNKNOWN, COL_SUBSERVICE_UNSUPPORTED,
2145
285
      "unsupported subservice: %u", cmp_col_get_subservice(col));
2146
2147
  /* reserve space for the compression entity header, we will build the
2148
   * header after the compression of the chunk
2149
   */
2150
269
  cmp_size_byte = cmp_ent_build_chunk_header(NULL, chunk_size, &cfg, start_timestamp, 0);
2151
269
  RETURN_ERROR_IF(dst && dst_capacity < cmp_size_byte, SMALL_BUFFER,
2152
269
      "dst_capacity must be at least as large as the minimum size of the compression unit.");
2153
2154
2155
  /* compress one collection after another */
2156
269
  for (read_bytes = 0;
2157
1.41k
       read_bytes <= chunk_size - COLLECTION_HDR_SIZE;
2158
1.25k
       read_bytes += cmp_col_get_size(col)) {
2159
1.25k
    const uint8_t *col_model = NULL;
2160
1.25k
    uint8_t *col_up_model = NULL;
2161
2162
    /* setup pointers for the next collection we want to compress */
2163
1.25k
    col = (const struct collection_hdr *)((const uint8_t *)chunk + read_bytes);
2164
1.25k
    if (chunk_model)
2165
18
      col_model = (const uint8_t *)chunk_model + read_bytes;
2166
1.25k
    if (updated_chunk_model)
2167
18
      col_up_model = (uint8_t *)updated_chunk_model + read_bytes;
2168
2169
1.25k
    RETURN_ERROR_IF(cmp_col_get_chunk_type(col) != chunk_type, CHUNK_SUBSERVICE_INCONSISTENT, "");
2170
2171
    /* chunk size is inconsistent with the sum of sizes in the collection headers */
2172
1.20k
    if (read_bytes + cmp_col_get_size(col) > chunk_size)
2173
0
      break;
2174
2175
1.20k
    cmp_size_byte = cmp_collection((const uint8_t *)col, col_model, col_up_model,
2176
1.20k
                 dst, dst_capacity, &cfg, cmp_size_byte);
2177
1.20k
    FORWARD_IF_ERROR(cmp_size_byte, "error occurred when compressing the collection with offset %u", read_bytes);
2178
1.20k
  }
2179
2180
160
  RETURN_ERROR_IF(read_bytes != chunk_size, CHUNK_SIZE_INCONSISTENT, "");
2181
2182
160
  FORWARD_IF_ERROR(cmp_ent_build_chunk_header(dst, chunk_size, &cfg,
2183
160
              start_timestamp, cmp_size_byte), "");
2184
2185
138
  return cmp_size_byte;
2186
160
}
2187
2188
2189
/**
2190
 * @brief returns the maximum compressed size in a worst-case scenario
2191
 * In case the input data is not compressible
2192
 * This function is primarily useful for memory allocation purposes
2193
 * (destination buffer size).
2194
 *
2195
 * @note if the number of collections is known you can use the
2196
 *  COMPRESS_CHUNK_BOUND macro for compilation-time evaluation
2197
 *  (stack memory allocation for example)
2198
 *
2199
 * @param chunk   pointer to the chunk you want to compress
2200
 * @param chunk_size  size of the chunk in bytes
2201
 *
2202
 * @returns maximum compressed size for a chunk compression on success or an
2203
 *  error code if it fails (which can be tested with cmp_is_error())
2204
 */
2205
2206
uint32_t compress_chunk_cmp_size_bound(const void *chunk, size_t chunk_size)
2207
285
{
2208
285
  int32_t read_bytes;
2209
285
  uint32_t num_col = 0;
2210
285
  size_t bound;
2211
285
  size_t const max_chunk_size = CMP_ENTITY_MAX_ORIGINAL_SIZE
2212
285
    - NON_IMAGETTE_HEADER_SIZE - CMP_COLLECTION_FILD_SIZE;
2213
2214
285
  RETURN_ERROR_IF(chunk == NULL, CHUNK_NULL, "");
2215
285
  RETURN_ERROR_IF(chunk_size < COLLECTION_HDR_SIZE, CHUNK_SIZE_INCONSISTENT, "");
2216
285
  RETURN_ERROR_IF(chunk_size > max_chunk_size, CHUNK_TOO_LARGE,
2217
285
      "chunk_size: %"PRIu32" > max_chunk_size: %"PRIu32"",
2218
285
      chunk_size, max_chunk_size);
2219
2220
  /* count the number of collections in the chunk */
2221
285
  for (read_bytes = 0;
2222
1.66k
       read_bytes <= (int32_t)(chunk_size-COLLECTION_HDR_SIZE);
2223
1.38k
       read_bytes += cmp_col_get_size((const struct collection_hdr *)
2224
1.38k
              ((const uint8_t *)chunk + read_bytes)))
2225
1.38k
    num_col++;
2226
2227
285
  RETURN_ERROR_IF((uint32_t)read_bytes != chunk_size, CHUNK_SIZE_INCONSISTENT, "");
2228
2229
285
  bound = COMPRESS_CHUNK_BOUND_UNSAFE(chunk_size, num_col);
2230
285
  RETURN_ERROR_IF(bound > CMP_ENTITY_MAX_SIZE, CHUNK_TOO_LARGE, "bound: %lu", bound);
2231
2232
285
  return (uint32_t)bound;
2233
285
}
2234
2235
2236
/**
2237
 * @brief set the model id and model counter in the compression entity header
2238
 *
2239
 * @param dst   pointer to the compressed data (starting with a
2240
 *      compression entity header)
2241
 * @param dst_size  byte size of the dst buffer
2242
 * @param model_id  model identifier; for identifying entities that originate
2243
 *      from the same starting model
2244
 * @param model_counter model_counter; counts how many times the model was
2245
 *      updated; for non model mode compression use 0
2246
 *
2247
 * @returns the byte size of the dst buffer (= dst_size) on success or an error
2248
 *  code if it fails (which can be tested with cmp_is_error())
2249
 */
2250
2251
uint32_t compress_chunk_set_model_id_and_counter(void *dst, uint32_t dst_size,
2252
             uint16_t model_id, uint8_t model_counter)
2253
138
{
2254
138
  RETURN_ERROR_IF(dst == NULL, ENTITY_NULL, "");
2255
138
  FORWARD_IF_ERROR(dst_size, "");
2256
138
  RETURN_ERROR_IF(dst_size < GENERIC_HEADER_SIZE, ENTITY_TOO_SMALL,
2257
138
      "dst_size: %"PRIu32"", dst_size);
2258
2259
138
  cmp_ent_set_model_id(dst, model_id);
2260
138
  cmp_ent_set_model_counter(dst, model_counter);
2261
2262
138
  return dst_size;
2263
138
}
2264
2265
2266
/**
2267
 * @brief compress data the same way as the RDCU HW compressor
2268
 *
2269
 * @param rcfg  pointer to a RDCU compression configuration (created with the
2270
 *    rdcu_cfg_create() function, set up with the rdcu_cfg_buffers()
2271
 *    and rdcu_cfg_imagette() functions)
2272
 * @param info  pointer to a compression information structure contains the
2273
 *    metadata of a compression (can be NULL)
2274
 *
2275
 * @returns the bit length of the bitstream on success or an error code if it
2276
 *  fails (which can be tested with cmp_is_error())
2277
 *
2278
 * @warning only the small buffer error in the info.cmp_err field is implemented
2279
 */
2280
2281
uint32_t compress_like_rdcu(const struct rdcu_cfg *rcfg, struct cmp_info *info)
2282
3.02k
{
2283
3.02k
  struct cmp_cfg cfg;
2284
3.02k
  uint32_t cmp_size_bit;
2285
2286
3.02k
  memset(&cfg, 0, sizeof(cfg));
2287
2288
3.02k
  if (info)
2289
175
    memset(info, 0, sizeof(*info));
2290
2291
3.02k
  if (!rcfg)
2292
0
    return compress_data_internal(NULL, 0);
2293
2294
3.02k
  cfg.data_type = DATA_TYPE_IMAGETTE;
2295
2296
3.02k
  cfg.src = rcfg->input_buf;
2297
3.02k
  cfg.model_buf = rcfg->model_buf;
2298
3.02k
  cfg.samples = rcfg->samples;
2299
3.02k
  cfg.stream_size = (rcfg->buffer_length * sizeof(uint16_t));
2300
3.02k
  cfg.cmp_mode = rcfg->cmp_mode;
2301
3.02k
  cfg.model_value = rcfg->model_value;
2302
3.02k
  cfg.round = rcfg->round;
2303
2304
3.02k
  if (info) {
2305
175
    info->cmp_err = 0;
2306
175
    info->cmp_mode_used = (uint8_t)rcfg->cmp_mode;
2307
175
    info->model_value_used = (uint8_t)rcfg->model_value;
2308
175
    info->round_used = (uint8_t)rcfg->round;
2309
175
    info->spill_used = rcfg->spill;
2310
175
    info->golomb_par_used = rcfg->golomb_par;
2311
175
    info->samples_used = rcfg->samples;
2312
175
    info->rdcu_new_model_adr_used = rcfg->rdcu_new_model_adr;
2313
175
    info->rdcu_cmp_adr_used = rcfg->rdcu_buffer_adr;
2314
175
    info->cmp_size = 0;
2315
175
    info->ap1_cmp_size = 0;
2316
175
    info->ap2_cmp_size = 0;
2317
2318
175
    cfg.cmp_par_imagette = rcfg->ap1_golomb_par;
2319
175
    cfg.spill_imagette = rcfg->ap1_spill;
2320
175
    if (cfg.cmp_par_imagette &&
2321
175
        cmp_cfg_icu_is_invalid_error_code(&cfg) == CMP_ERROR_NO_ERROR)
2322
9
      info->ap1_cmp_size = compress_data_internal(&cfg, 0);
2323
2324
2325
175
    cfg.cmp_par_imagette = rcfg->ap2_golomb_par;
2326
175
    cfg.spill_imagette = rcfg->ap2_spill;
2327
175
    if (cfg.cmp_par_imagette &&
2328
175
        cmp_cfg_icu_is_invalid_error_code(&cfg) == CMP_ERROR_NO_ERROR)
2329
30
      info->ap2_cmp_size = compress_data_internal(&cfg, 0);
2330
175
  }
2331
2332
3.02k
  cfg.cmp_par_imagette = rcfg->golomb_par;
2333
3.02k
  cfg.spill_imagette = rcfg->spill;
2334
3.02k
  cfg.updated_model_buf = rcfg->icu_new_model_buf;
2335
3.02k
  cfg.dst = rcfg->icu_output_buf;
2336
2337
3.02k
  FORWARD_IF_ERROR(cmp_cfg_icu_is_invalid_error_code(&cfg), "");
2338
2339
2.99k
  cmp_size_bit = compress_data_internal(&cfg, 0);
2340
2341
2.99k
  if (info) {
2342
138
    if (cmp_get_error_code(cmp_size_bit) == CMP_ERROR_SMALL_BUFFER)
2343
22
      info->cmp_err |= 1UL << 0;/* SMALL_BUFFER_ERR_BIT;*/ /* set small buffer error */
2344
138
    if (cmp_is_error(cmp_size_bit)) {
2345
22
      info->cmp_size = 0;
2346
22
      info->ap1_cmp_size = 0;
2347
22
      info->ap2_cmp_size = 0;
2348
116
    } else {
2349
116
      info->cmp_size = cmp_size_bit;
2350
116
    }
2351
138
  }
2352
2353
2.99k
  return cmp_size_bit;
2354
3.02k
}