diff --git a/examples/example_compress_chunk.c b/examples/example_compress_chunk.c new file mode 100644 index 0000000000000000000000000000000000000000..3291de47590f02c7632d9357583011dad50e3171 --- /dev/null +++ b/examples/example_compress_chunk.c @@ -0,0 +1,277 @@ +/** + * @file example_compress_chunk.c + * @author Dominik Loidolt (dominik.loidolt@univie.ac.at) + * @date Feb 2024 + * + * @copyright GPLv2 + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * @brief demonstration of the chunk compressor + */ + + +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +#include <leon_inttypes.h> + +#include <cmp_chunk.h> +#include <cmp_data_types.h> + + +/* + * The asw_version_id, model_id and counter have to be managed by the ASW here we + * use arbitrary values for demonstration + */ +#define ASW_VERSION_ID 1 +#define MODEL_ID 42 +#define MODEL_COUNTER 1 + + +/** + * @brief This is a dummy implementation of a function returning a current timestamp + */ + +static uint64_t dummy_return_timestamp(void) +{ + return 0x0FF1CC0FFEE; /* arbitrary value */ +} + + +/** + * @brief Demonstration of a 1d chunk compression + * + * Compressing a background/offset chunk in 1d-differencing mode with zero escape + * mechanism + */ + +static int demo_comperss_chunk_1d(void) +{ + struct background background_data[1] = {{0, 1, 0xF0}}; + struct offset offset_data[2] = {{1, 2}, {3, 4}}; + enum { CHUNK_SIZE = 2*COLLECTION_HDR_SIZE + sizeof(background_data) + + sizeof(offset_data) }; + uint32_t chunk[ROUND_UP_TO_4(CHUNK_SIZE)/4] = {0}; /* Do not put large amount of data on the stack! */ + + uint32_t *compressed_data; + int cmp_size_bytes; + + { /* build a chunk of a background and an offset collection */ + struct collection_hdr *col = (struct collection_hdr *)chunk; + + if (cmp_col_set_subservice(col, SST_NCxx_S_SCIENCE_BACKGROUND)) + return -1; + if (cmp_col_set_data_length(col, sizeof(background_data))) + return -1; + memcpy(col->entry, background_data, sizeof(background_data)); + + col = (struct collection_hdr *)((uint8_t *)chunk + cmp_col_get_size(col)); + if (cmp_col_set_subservice(col, SST_NCxx_S_SCIENCE_OFFSET)) + return -1; + if (cmp_col_set_data_length(col, sizeof(offset_data))) + return -1; + memcpy(col->entry, offset_data, sizeof(offset_data)); + } + + /* the chunk compression only needs to be initialised once */ + compress_chunk_init(&dummy_return_timestamp, ASW_VERSION_ID); + + { /* compress the chunk */ + struct cmp_par cmp_par; + uint32_t cmp_size_bound; + + /* prepare the compression parameters needed to compress a + * background/offset chunk (arbitrary values) + */ + memset(&cmp_par, 0, sizeof(cmp_par)); + cmp_par.cmp_mode = CMP_MODE_DIFF_ZERO; + cmp_par.nc_offset_mean = 1; + cmp_par.nc_offset_variance = 2; + cmp_par.nc_background_mean = 3; + cmp_par.nc_background_variance = 4; + cmp_par.nc_background_outlier_pixels = 5; + + /* prepare the buffer for the compressed data */ + cmp_size_bound = compress_chunk_cmp_size_bound(chunk, CHUNK_SIZE); + if (!cmp_size_bound) { + printf("Error occurred during compress_chunk_cmp_size_bound()\n"); + /* error handling */ + return -1; + } + compressed_data = malloc(cmp_size_bound); + if (!compressed_data) { + printf("Error occurred during malloc()\n"); + /* error handling */ + return -1; + } + + cmp_size_bytes = compress_chunk(chunk, CHUNK_SIZE, NULL, NULL, + compressed_data, cmp_size_bound, + &cmp_par); + if (cmp_size_bytes < 0) { + printf("Error occurred during compress_chunk()\n"); + if (cmp_size_bytes == CMP_ERROR_SMALL_BUF) + printf("The compressed data buffer is too small to hold all compressed data!\n"); + free(compressed_data); + /* error handling */ + return -1; + } + if (compress_chunk_set_model_id_and_counter(compressed_data, cmp_size_bytes, + MODEL_ID, 0)) { + printf("Error occurred during compress_chunk_set_model_id_and_counter()\n"); + free(compressed_data); + /* error handling */ + return -1; + } + } + + { /* have a look at the compressed data */ + int i; + + printf("Here's the compressed data including the compression entity header (size %d):\n", cmp_size_bytes); + for (i = 0; i < cmp_size_bytes; i++) { + const uint8_t *p = (uint8_t *)compressed_data; /* the compression entity is big-endian */ + printf("%02X ", p[i]); + if (i && !((i + 1) % 32)) + printf("\n"); + } + printf("\n\n"); + } + free(compressed_data); + return 0; +} + + +/** + * @brief Demonstration of a model chunk compression + * + * Compressing a background/offset chunk in model mode with multi escape + * mechanism + */ + +static int demo_comperss_chunk_model(void) +{ + struct background background_model[1] = {{0, 1, 0xF0}}; + struct offset offset_model[2] = {{1, 2}, {3, 4}}; + struct background background_data[1] = {{1, 2, 0xFA}}; + struct offset offset_data[2] = {{1, 32}, {23, 42}}; + enum { CHUNK_SIZE = 2*COLLECTION_HDR_SIZE + sizeof(background_data) + + sizeof(offset_data) }; + uint32_t chunk[ROUND_UP_TO_4(CHUNK_SIZE)/4] = {0}; /* Do not put large amount of data on the stack! */ + uint32_t model_chunk[ROUND_UP_TO_4(CHUNK_SIZE)/4] = {0}; /* Do not put large amount of data on the stack! */ + uint32_t updated_chunk_model[ROUND_UP_TO_4(CHUNK_SIZE)/4] = {0}; /* Do not put large amount of data on the stack! */ + + /* + * Here we use the COMPRESS_CHUNK_BOUND macro to determine the worst + * case compression size; to do this we need to know the chunk_size and + * the number of collections in the chunk (2 in this demo) + */ + uint32_t compressed_data[COMPRESS_CHUNK_BOUND(CHUNK_SIZE, 2)]; /* Do not put large amount of data on the stack! */ + int cmp_size_bytes; + + { /* build a chunk of a background and an offset collection */ + struct collection_hdr *col = (struct collection_hdr *)chunk; + + if (cmp_col_set_subservice(col, SST_NCxx_S_SCIENCE_BACKGROUND)) + return -1; + if (cmp_col_set_data_length(col, sizeof(background_data))) + return -1; + memcpy(col->entry, background_data, sizeof(background_data)); + + col = (struct collection_hdr *)((uint8_t *)chunk + cmp_col_get_size(col)); + if (cmp_col_set_subservice(col, SST_NCxx_S_SCIENCE_OFFSET)) + return -1; + if (cmp_col_set_data_length(col, sizeof(offset_data))) + return -1; + memcpy(col->entry, offset_data, sizeof(offset_data)); + + /* build a model_chunk of a background and an offset collection */ + col = (struct collection_hdr *)model_chunk; + + if (cmp_col_set_subservice(col, SST_NCxx_S_SCIENCE_BACKGROUND)) + return -1; + if (cmp_col_set_data_length(col, sizeof(background_model))) + return -1; + memcpy(col->entry, background_model, sizeof(background_model)); + + col = (struct collection_hdr *)((uint8_t *)model_chunk + cmp_col_get_size(col)); + if (cmp_col_set_subservice(col, SST_NCxx_S_SCIENCE_OFFSET)) + return -1; + if (cmp_col_set_data_length(col, sizeof(offset_model))) + return -1; + memcpy(col->entry, offset_model, sizeof(offset_model)); + } + + /* the chunk compression only needs to be initialised once */ + compress_chunk_init(&dummy_return_timestamp, ASW_VERSION_ID); + + { /* compress the chunk */ + struct cmp_par cmp_par; + + /* prepare the compression parameters needed to compress a + * background/offset chunk + */ + memset(&cmp_par, 0, sizeof(cmp_par)); + cmp_par.cmp_mode = CMP_MODE_MODEL_MULTI; + cmp_par.model_value = 11; + cmp_par.nc_offset_mean = 1; + cmp_par.nc_offset_variance = 2; + cmp_par.nc_background_mean = 3; + cmp_par.nc_background_variance = 4; + cmp_par.nc_background_outlier_pixels = 5; + + cmp_size_bytes = compress_chunk(chunk, CHUNK_SIZE, model_chunk, + updated_chunk_model, compressed_data, + sizeof(compressed_data), &cmp_par); + if (cmp_size_bytes < 0) { + printf("Error occurred during compress_chunk()\n"); + if (cmp_size_bytes == CMP_ERROR_SMALL_BUF) + printf("The compressed data buffer is too small to hold all compressed data!\n"); + /* error handling */ + return -1; + } + if (compress_chunk_set_model_id_and_counter(compressed_data, cmp_size_bytes, + MODEL_ID, MODEL_COUNTER)) { + printf("Error occurred during compress_chunk_set_model_id_and_counter()\n"); + /* error handling */ + return -1; + } + } + + { /* have a look at the compressed data */ + int i; + + printf("Here's the compressed data including the compression entity header (size %d):\n", cmp_size_bytes); + for (i = 0; i < cmp_size_bytes; i++) { + const uint8_t *p = (uint8_t *)compressed_data; /* the compression entity is big-endian */ + printf("%02X ", p[i]); + if (i && !((i + 1) % 32)) + printf("\n"); + } + printf("\n"); + } + + return 0; +} + + +int main(void) +{ + int error = 0; + + error |= demo_comperss_chunk_1d(); + /* error |= demo_comperss_chunk_model(); */ + + if (error) + return EXIT_FAILURE; + return EXIT_SUCCESS; +} diff --git a/examples/meson.build b/examples/meson.build index d9cced11fb2ba81d26dde91b4d16d430b7ceceb0..d83ac4014ab4ab398fa868f68db5cd3d55a3f9ab 100644 --- a/examples/meson.build +++ b/examples/meson.build @@ -13,9 +13,19 @@ example_cmp_rdcu_src = files([ 'example_cmp_rdcu.c' ]) - -example_cmp_rdcu_lib = static_library('example_cmp_rdcu', +example_cmp_rdcu_lib = executable('example_cmp_rdcu', sources : example_cmp_rdcu_src, include_directories : incdir, link_with : cmp_lib, ) + + +example_compress_chunk_src = files([ + 'example_compress_chunk.c' +]) + +example_cmp_rdcu_lib = executable('example_compress_chunk', + sources : example_compress_chunk_src, + include_directories : incdir, + link_with : cmp_lib, +) diff --git a/lib/cmp_chunk.h b/lib/cmp_chunk.h new file mode 100644 index 0000000000000000000000000000000000000000..a71200016020b962ab2d61d730f47e5e9df97f2b --- /dev/null +++ b/lib/cmp_chunk.h @@ -0,0 +1,133 @@ +/** + * @file cmp_chunk.h + * @author Dominik Loidolt (dominik.loidolt@univie.ac.at) + * @date 2024 + * + * @copyright GPLv2 + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * @brief software chunk compression library + * @see Data Compression User Manual PLATO-UVIE-PL-UM-0001 + */ + +#ifndef CMP_CHUNK_H +#define CMP_CHUNK_H + +#include "common/cmp_entity.h" + + +#define ROUND_UP_TO_4(x) ((((x)+3)*4)/4) + + +/** + * @brief returns the maximum compressed size in a worst case scenario + * + * In case the input data is not compressible + * This macro is primarily useful for compilation-time evaluation + * (stack memory allocation for example) + * + * @note if the number of collections is not know you can use the + * compress_chunk_cmp_size_bound function + * + * @param chunk_size size in bytes of the chunk + * @param num_col number of collections in the chunk + * + * @returns maximum compressed size for chunk compression; 0 on error + */ + +#define COMPRESS_CHUNK_BOUND(chunk_size, num_col) ( \ + (uint32_t)ROUND_UP_TO_4(NON_IMAGETTE_HEADER_SIZE+(chunk_size)+(num_col)*CMP_COLLECTION_FILD_SIZE) > (uint32_t)CMP_ENTITY_MAX_SIZE ? 0 \ + : ROUND_UP_TO_4(NON_IMAGETTE_HEADER_SIZE+(chunk_size)+(num_col)*CMP_COLLECTION_FILD_SIZE) \ +) + + +/** + * @brief returns the maximum compressed size in a worst case scenario + * + * In case the input data is not compressible + * This function is primarily useful for memory allocation purposes + * (destination buffer size). + * + * @note if the number of collections is known you can use the + * COMPRESS_CHUNK_BOUND macro for compilation-time evaluation + * (stack memory allocation for example) + * + * @param chunk pointer to the chunk you want compress + * @param chunk_size size of the chunk in bytes + * + * @returns maximum compressed size for a chunk compression; 0 on error + */ + +uint32_t compress_chunk_cmp_size_bound(void *chunk, uint32_t chunk_size); + + +/** + * @brief initialise the compress_chunk() function + * + * If not initialised the compress_chunk() function sets the timestamps and + * version_id in the compression entity header to zero + * + * @param return_timestamp pointer to a function returning a current 48-bit + * timestamp + * @param version_id version identifier of the applications software + */ + +void compress_chunk_init(uint64_t(return_timestamp)(void), uint32_t version_id); + + +/** + * @brief compress a data chunk consisting of put together data collections + * + * @param chunk pointer to the chunk to be compressed + * @param chunk_size byte size of the chunk + * @param chunk_model pointer to a model of a chunk; has the same size + * as the chunk (can be NULL if no model compression + * mode is used) + * @param updated_chunk_model pointer to store the updated model for the next + * model mode compression; has the same size as the + * chunk (can be the same as the model_of_data + * buffer for in-place update or NULL if updated + * model is not needed) + * @param dst destination pointer to the compressed data + * buffer; has to be 4-byte aligned (can be NULL) + * @param dst_capacity capacity of the dst buffer; it's recommended to + * provide a dst_capacity >= + * compress_chunk_cmp_size_bound(chunk, chunk_size) + * as it eliminates one potential failure scenario: + * not enough space in the dst buffer to write the + * compressed data; size is round down to a multiple + * of 4 + * @returns the byte size of the compressed_data buffer on success; negative on + * error, CMP_ERROR_SMALL_BUF (-2) if the compressed data buffer is too + * small to hold the whole compressed data + */ + +int compress_chunk(uint32_t *chunk, uint32_t chunk_size, uint32_t *chunk_model, + uint32_t *updated_chunk_model, uint32_t *dst, uint32_t dst_capacity, + const struct cmp_par *cmp_par); + +/** + * @brief set the model id and model counter in the compression entity header + * + * @param dst pointer to the compressed data starting with a + * compression entity header + * @param dst_size byte size of the dst buffer + * @param model_id model identifier; for identifying entities that originate + * from the same starting model + * @param model_counter model_counter; counts how many times the model was + * updated; for non model mode compression use 0 + * + * @returns 0 on success, otherwise error + */ + +int compress_chunk_set_model_id_and_counter(uint32_t *dst, int dst_size, + uint16_t model_id, uint8_t model_counter); + +#endif /* CMP_CHUNK_H */ diff --git a/lib/common/cmp_cal_up_model.h b/lib/common/cmp_cal_up_model.h index bed7fa12c55a22d8b36c44ef8d69bb0d5ee7a10b..afd8e0c8f15e1c321c6f4b5c3151763fcda7c4ba 100644 --- a/lib/common/cmp_cal_up_model.h +++ b/lib/common/cmp_cal_up_model.h @@ -56,6 +56,8 @@ * @brief implantation of the model update equation * @note check before that model_value is not greater than MAX_MODEL_VALUE * + * @warning: Do not use this macro with types larger than uint32_t + * * @param data data to process * @param model (current) model of the data to process * @param model_value model weighting parameter @@ -64,20 +66,20 @@ * @returns (new) updated model */ -#define cmp_up_model(data, model, model_value, round) \ - __extension__ \ - ({ \ - __typeof__(data) __ret; \ - switch (sizeof(data)) { \ - case sizeof(uint8_t): \ - case sizeof(uint16_t): \ - __ret = (__typeof__(__ret))cmp_up_model16(data, model, model_value, round); \ - break; \ - case sizeof(uint32_t): \ - __ret = (__typeof__(__ret))cmp_up_model32(data, model, model_value, round); \ - break; \ - } \ - __ret; \ +#define cmp_up_model(data, model, model_value, round) \ + __extension__ \ + ({ \ + __typeof__(data) __ret; \ + switch (sizeof(data)) { \ + case sizeof(uint8_t): \ + case sizeof(uint16_t): \ + __ret = (__typeof__(__ret))cmp_up_model16(data, model, model_value, round); \ + break; \ + case sizeof(uint32_t): \ + __ret = (__typeof__(__ret))cmp_up_model32(data, model, model_value, round); \ + break; \ + } \ + __ret; \ }) diff --git a/lib/common/cmp_data_types.c b/lib/common/cmp_data_types.c index 4e37fe527c3b031ea0f83565dac9c0c379df5e08..a47a1b85718091c4457225b6540f35f19bb68515 100644 --- a/lib/common/cmp_data_types.c +++ b/lib/common/cmp_data_types.c @@ -54,7 +54,7 @@ uint64_t cmp_col_get_timestamp(const struct collection_hdr *col) uint16_t cmp_col_get_configuration_id(const struct collection_hdr *col) { - return (be16_to_cpu(col->configuration_id)); + return be16_to_cpu(col->configuration_id); } @@ -68,7 +68,7 @@ uint16_t cmp_col_get_configuration_id(const struct collection_hdr *col) uint16_t cmp_col_get_col_id(const struct collection_hdr *col) { - return (be16_to_cpu(col->collection_id)); + return be16_to_cpu(col->collection_id); } @@ -151,12 +151,26 @@ uint8_t cmp_col_get_sequence_num(const struct collection_hdr *col) * * @param col pointer to a collection header * - * @returns the collection length in bytes (TBC: without collection header) + * @returns the collection length in bytes */ -uint16_t cmp_col_get_length(const struct collection_hdr *col) +uint16_t cmp_col_get_data_length(const struct collection_hdr *col) { - return (be16_to_cpu(col->collection_length)); + return be16_to_cpu(col->collection_length); +} + + +/** + * @brief get the entire collection size (header plus data size) + * + * @param col pointer to a collection header + * + * @returns the collection size in bytes + */ + +uint32_t cmp_col_get_size(const struct collection_hdr *col) +{ + return COLLECTION_HDR_SIZE + cmp_col_get_data_length(col); } @@ -338,7 +352,7 @@ int cmp_col_set_sequence_num(struct collection_hdr *col, uint8_t sequence_num) * @returns 0 on success, otherwise error */ -int cmp_col_set_length(struct collection_hdr *col, uint16_t length) +int cmp_col_set_data_length(struct collection_hdr *col, uint16_t length) { if (!col) return -1; @@ -347,6 +361,141 @@ int cmp_col_set_length(struct collection_hdr *col, uint16_t length) return 0; } + +/* TODO: doc string */ + +enum cmp_data_type convert_subservice_to_cmp_data_type(uint8_t subservice) +{ + switch (subservice) { + case SST_NCxx_S_SCIENCE_IMAGETTE: + return DATA_TYPE_IMAGETTE; + case SST_NCxx_S_SCIENCE_SAT_IMAGETTE: + return DATA_TYPE_SAT_IMAGETTE; + case SST_NCxx_S_SCIENCE_OFFSET: + return DATA_TYPE_OFFSET; + case SST_NCxx_S_SCIENCE_BACKGROUND: + return DATA_TYPE_BACKGROUND; + case SST_NCxx_S_SCIENCE_SMEARING: + return DATA_TYPE_SMEARING; + case SST_NCxx_S_SCIENCE_S_FX: + return DATA_TYPE_S_FX; + case SST_NCxx_S_SCIENCE_S_FX_EFX: + return DATA_TYPE_S_FX_EFX; + case SST_NCxx_S_SCIENCE_S_FX_NCOB: + return DATA_TYPE_S_FX_NCOB; + case SST_NCxx_S_SCIENCE_S_FX_EFX_NCOB_ECOB: + return DATA_TYPE_S_FX_EFX_NCOB_ECOB; + case SST_NCxx_S_SCIENCE_L_FX: + return DATA_TYPE_L_FX; + case SST_NCxx_S_SCIENCE_L_FX_EFX: + return DATA_TYPE_L_FX_EFX; + case SST_NCxx_S_SCIENCE_L_FX_NCOB: + return DATA_TYPE_L_FX_NCOB; + case SST_NCxx_S_SCIENCE_L_FX_EFX_NCOB_ECOB: + return DATA_TYPE_L_FX_EFX_NCOB_ECOB; + case SST_NCxx_S_SCIENCE_F_FX: + return DATA_TYPE_F_FX; + case SST_NCxx_S_SCIENCE_F_FX_EFX: + return DATA_TYPE_F_FX_EFX; + case SST_NCxx_S_SCIENCE_F_FX_NCOB: + return DATA_TYPE_F_FX_NCOB; + case SST_NCxx_S_SCIENCE_F_FX_EFX_NCOB_ECOB: + return DATA_TYPE_F_FX_EFX_NCOB_ECOB; + case SST_FCx_S_SCIENCE_IMAGETTE: + return DATA_TYPE_F_CAM_IMAGETTE; + case SST_FCx_S_SCIENCE_OFFSET_VALUES: + return DATA_TYPE_F_CAM_OFFSET; + /* TODO: SST_FCx_S_BACKGROUND_VALUES and SST_NCxx_S_SCIENCE_IMAGETTE has + * the same subservice number*/ + /* case SST_FCx_S_BACKGROUND_VALUES: */ + /* return DATA_TYPE_F_CAM_BACKGROUND; */ + /* break; */ + default: + return DATA_TYPE_UNKNOWN; + }; +} + + +/* TODO: doc string */ + +uint8_t convert_data_type_to_subservice(enum cmp_data_type data_type) +{ + uint8_t sst = 0; + + switch (data_type) { + case DATA_TYPE_IMAGETTE: + case DATA_TYPE_IMAGETTE_ADAPTIVE: + sst = SST_NCxx_S_SCIENCE_IMAGETTE; + break; + case DATA_TYPE_SAT_IMAGETTE: + case DATA_TYPE_SAT_IMAGETTE_ADAPTIVE: + sst = SST_NCxx_S_SCIENCE_SAT_IMAGETTE; + break; + case DATA_TYPE_OFFSET: + sst = SST_NCxx_S_SCIENCE_OFFSET; + break; + case DATA_TYPE_BACKGROUND: + sst = SST_NCxx_S_SCIENCE_BACKGROUND; + break; + case DATA_TYPE_SMEARING: + sst = SST_NCxx_S_SCIENCE_SMEARING; + break; + case DATA_TYPE_S_FX: + sst = SST_NCxx_S_SCIENCE_S_FX; + break; + case DATA_TYPE_S_FX_EFX: + sst = SST_NCxx_S_SCIENCE_S_FX_EFX; + break; + case DATA_TYPE_S_FX_NCOB: + sst = SST_NCxx_S_SCIENCE_S_FX_NCOB; + break; + case DATA_TYPE_S_FX_EFX_NCOB_ECOB: + sst = SST_NCxx_S_SCIENCE_S_FX_EFX_NCOB_ECOB; + break; + case DATA_TYPE_L_FX: + sst = SST_NCxx_S_SCIENCE_L_FX; + break; + case DATA_TYPE_L_FX_EFX: + sst = SST_NCxx_S_SCIENCE_L_FX_EFX; + break; + case DATA_TYPE_L_FX_NCOB: + sst = SST_NCxx_S_SCIENCE_L_FX_NCOB; + break; + case DATA_TYPE_L_FX_EFX_NCOB_ECOB: + sst = SST_NCxx_S_SCIENCE_L_FX_EFX_NCOB_ECOB; + break; + case DATA_TYPE_F_FX: + sst = SST_NCxx_S_SCIENCE_F_FX; + break; + case DATA_TYPE_F_FX_EFX: + sst = SST_NCxx_S_SCIENCE_F_FX_EFX; + break; + case DATA_TYPE_F_FX_NCOB: + sst = SST_NCxx_S_SCIENCE_F_FX_NCOB; + break; + case DATA_TYPE_F_FX_EFX_NCOB_ECOB: + sst = SST_NCxx_S_SCIENCE_F_FX_EFX_NCOB_ECOB; + break; + case DATA_TYPE_F_CAM_IMAGETTE: + case DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE: + sst = SST_FCx_S_SCIENCE_IMAGETTE; + break; + case DATA_TYPE_F_CAM_OFFSET: + sst = SST_FCx_S_SCIENCE_OFFSET_VALUES; + break; + case DATA_TYPE_F_CAM_BACKGROUND: + sst = SST_FCx_S_BACKGROUND_VALUES; + break; + default: + case DATA_TYPE_UNKNOWN: + debug_print("Error: Unknown compression data type!\n"); + sst = (uint8_t)-1; + }; + + return sst; +} + + /** * @brief calculate the size of a sample for the different compression data type * diff --git a/lib/common/cmp_data_types.h b/lib/common/cmp_data_types.h index dbed58341e849eb4db29ff6a9faaf168342bf528..f0a700d415410cb2c2a74fb1720b934b8a4e595a 100644 --- a/lib/common/cmp_data_types.h +++ b/lib/common/cmp_data_types.h @@ -68,6 +68,11 @@ /* size of a collection (multi entry) header */ #define COLLECTION_HDR_SIZE 12 +enum col_packet_type { + COL_WINDOW_PKT_TYPE = 0, + COL_SCI_PKTS_TYPE = 1 +}; + /** * @brief source data header structure for collection packet @@ -75,8 +80,9 @@ * entries of the same science data * @see PLATO-LESIA-PL-RP-0031(N-DPU->ICU data rate) */ -union collection_id{ +union collection_id { uint16_t collection_id; + __extension__ struct { #ifdef __LITTLE_ENDIAN uint16_t sequence_num:7; @@ -90,7 +96,7 @@ union collection_id{ uint16_t sequence_num:7; #endif } field __attribute__((packed)); -}__attribute__((packed)); +} __attribute__((packed)); __extension__ struct collection_hdr { @@ -102,6 +108,7 @@ struct collection_hdr { } __attribute__((packed)); compile_time_assert(sizeof(struct collection_hdr) == COLLECTION_HDR_SIZE, N_DPU_ICU_COLLECTION_HDR_SIZE_IS_NOT_CORRECT); compile_time_assert(sizeof(struct collection_hdr) % sizeof(uint32_t) == 0, N_DPU_ICU_COLLECTION_HDR_NOT_4_BYTE_ALLIED); +/* TODO: compile_time_assert(sizeof(struct collection_hdr.collection_id) == sizeof(union collection_id), N_DPU_ICU_COLLECTION_COLLECTION_ID_DO_NOT_MATCH); */ /** @@ -300,7 +307,8 @@ uint8_t cmp_col_get_subservice(const struct collection_hdr *col); uint8_t cmp_col_get_ccd_id(const struct collection_hdr *col); uint8_t cmp_col_get_sequence_num(const struct collection_hdr *col); -uint16_t cmp_col_get_length(const struct collection_hdr *col); +uint16_t cmp_col_get_data_length(const struct collection_hdr *col); +uint32_t cmp_col_get_size(const struct collection_hdr *col); /* collection header getter functions */ @@ -313,8 +321,9 @@ int cmp_col_set_subservice(struct collection_hdr *col, uint8_t subservice); int cmp_col_set_ccd_id(struct collection_hdr *col, uint8_t ccd_id); int cmp_col_set_sequence_num(struct collection_hdr *col, uint8_t sequence_num); -int cmp_col_set_length(struct collection_hdr *col, uint16_t length); +int cmp_col_set_data_length(struct collection_hdr *col, uint16_t length); +enum cmp_data_type convert_subservice_to_cmp_data_type(uint8_t subservice); size_t size_of_a_sample(enum cmp_data_type data_type); uint32_t cmp_cal_size_of_data(uint32_t samples, enum cmp_data_type data_type); diff --git a/lib/common/cmp_entity.c b/lib/common/cmp_entity.c index 4517dc81e4c099976a844f09b64e338418dce78a..76710b609a20236f925183343bddbd2b15505ec0 100644 --- a/lib/common/cmp_entity.c +++ b/lib/common/cmp_entity.c @@ -99,6 +99,7 @@ uint32_t cmp_ent_cal_hdr_size(enum cmp_data_type data_type, int raw_mode_flag) case DATA_TYPE_F_FX_EFX_NCOB_ECOB: case DATA_TYPE_F_CAM_OFFSET: case DATA_TYPE_F_CAM_BACKGROUND: + case DATA_TYPE_CHUNK: size = NON_IMAGETTE_HEADER_SIZE; break; case DATA_TYPE_UNKNOWN: @@ -1676,6 +1677,7 @@ void *cmp_ent_get_data_buf(struct cmp_entity *ent) case DATA_TYPE_F_FX_EFX_NCOB_ECOB: case DATA_TYPE_F_CAM_OFFSET: case DATA_TYPE_F_CAM_BACKGROUND: + case DATA_TYPE_CHUNK: return ent->non_ima.cmp_data; /* LCOV_EXCL_START */ case DATA_TYPE_UNKNOWN: @@ -1865,30 +1867,6 @@ int cmp_ent_write_cmp_pars(struct cmp_entity *ent, const struct cmp_cfg *cfg, case DATA_TYPE_BACKGROUND: case DATA_TYPE_F_CAM_BACKGROUND: case DATA_TYPE_SMEARING: - if (cmp_ent_set_non_ima_cmp_par1(ent, cfg->cmp_par_mean)) - return -1; - if (cmp_ent_set_non_ima_spill1(ent, cfg->spill_mean)) - return -1; - - if (cmp_ent_set_non_ima_cmp_par2(ent, cfg->cmp_par_variance)) - return -1; - if (cmp_ent_set_non_ima_spill2(ent, cfg->spill_variance)) - return -1; - - if (cmp_ent_set_non_ima_cmp_par3(ent, cfg->cmp_par_pixels_error)) - return -1; - if (cmp_ent_set_non_ima_spill3(ent, cfg->spill_pixels_error)) - return -1; - - cmp_ent_set_non_ima_cmp_par4(ent, 0); - cmp_ent_set_non_ima_spill4(ent, 0); - - cmp_ent_set_non_ima_cmp_par5(ent, 0); - cmp_ent_set_non_ima_spill5(ent, 0); - - cmp_ent_set_non_ima_cmp_par6(ent, 0); - cmp_ent_set_non_ima_spill6(ent, 0); - break; case DATA_TYPE_S_FX: case DATA_TYPE_S_FX_EFX: case DATA_TYPE_S_FX_NCOB: @@ -1901,34 +1879,34 @@ int cmp_ent_write_cmp_pars(struct cmp_entity *ent, const struct cmp_cfg *cfg, case DATA_TYPE_F_FX_EFX: case DATA_TYPE_F_FX_NCOB: case DATA_TYPE_F_FX_EFX_NCOB_ECOB: - if (cmp_ent_set_non_ima_cmp_par1(ent, cfg->cmp_par_exp_flags)) + if (cmp_ent_set_non_ima_cmp_par1(ent, cfg->cmp_par_1)) return -1; - if (cmp_ent_set_non_ima_spill1(ent, cfg->spill_exp_flags)) + if (cmp_ent_set_non_ima_spill1(ent, cfg->spill_par_1)) return -1; - if (cmp_ent_set_non_ima_cmp_par2(ent, cfg->cmp_par_fx)) + if (cmp_ent_set_non_ima_cmp_par2(ent, cfg->cmp_par_2)) return -1; - if (cmp_ent_set_non_ima_spill2(ent, cfg->spill_fx)) + if (cmp_ent_set_non_ima_spill2(ent, cfg->spill_par_2)) return -1; - if (cmp_ent_set_non_ima_cmp_par3(ent, cfg->cmp_par_ncob)) + if (cmp_ent_set_non_ima_cmp_par3(ent, cfg->cmp_par_3)) return -1; - if (cmp_ent_set_non_ima_spill3(ent, cfg->spill_ncob)) + if (cmp_ent_set_non_ima_spill3(ent, cfg->spill_par_3)) return -1; - if (cmp_ent_set_non_ima_cmp_par4(ent, cfg->cmp_par_efx)) + if (cmp_ent_set_non_ima_cmp_par4(ent, cfg->cmp_par_4)) return -1; - if (cmp_ent_set_non_ima_spill4(ent, cfg->spill_efx)) + if (cmp_ent_set_non_ima_spill4(ent, cfg->spill_par_4)) return -1; - if (cmp_ent_set_non_ima_cmp_par5(ent, cfg->cmp_par_ecob)) + if (cmp_ent_set_non_ima_cmp_par5(ent, cfg->cmp_par_5)) return -1; - if (cmp_ent_set_non_ima_spill5(ent, cfg->spill_ecob)) + if (cmp_ent_set_non_ima_spill5(ent, cfg->spill_par_5)) return -1; - if (cmp_ent_set_non_ima_cmp_par6(ent, cfg->cmp_par_fx_cob_variance)) + if (cmp_ent_set_non_ima_cmp_par6(ent, cfg->cmp_par_6)) return -1; - if (cmp_ent_set_non_ima_spill6(ent, cfg->spill_fx_cob_variance)) + if (cmp_ent_set_non_ima_spill6(ent, cfg->spill_par_6)) return -1; break; @@ -2413,6 +2391,50 @@ static void cmp_ent_parese_adaptive_imagette_header(const struct cmp_entity *ent } +/** + * @brief parse the non-imagette specific compressed entity header + * + * @param ent pointer to a compression entity + */ + +static void cmp_ent_parese_non_imagette_header(const struct cmp_entity *ent) +{ + uint32_t spill_1_used, cmp_par_1_used, spill_2_used, cmp_par_2_used, + spill_3_used, cmp_par_3_used, spill_4_used, cmp_par_4_used, + spill_5_used, cmp_par_5_used; + + spill_1_used = cmp_ent_get_non_ima_spill1(ent); + printf("Used Spillover Threshold Parameter 1: %" PRIu32 "\n", spill_1_used); + + cmp_par_1_used = cmp_ent_get_non_ima_cmp_par1(ent); + printf("Used Compression Parameter 1: %" PRIu32 "\n", cmp_par_1_used); + + spill_2_used = cmp_ent_get_non_ima_spill2(ent); + printf("Used Spillover Threshold Parameter 2: %" PRIu32 "\n", spill_2_used); + + cmp_par_2_used = cmp_ent_get_non_ima_cmp_par2(ent); + printf("Used Compression Parameter 2: %" PRIu32 "\n", cmp_par_2_used); + + spill_3_used = cmp_ent_get_non_ima_spill3(ent); + printf("Used Spillover Threshold Parameter 3: %" PRIu32 "\n", spill_3_used); + + cmp_par_3_used = cmp_ent_get_non_ima_cmp_par3(ent); + printf("Used Compression Parameter 3: %" PRIu32 "\n", cmp_par_3_used); + + spill_4_used = cmp_ent_get_non_ima_spill4(ent); + printf("Used Spillover Threshold Parameter 4: %" PRIu32 "\n", spill_4_used); + + cmp_par_4_used = cmp_ent_get_non_ima_cmp_par4(ent); + printf("Used Compression Parameter 4: %" PRIu32 "\n", cmp_par_4_used); + + spill_5_used = cmp_ent_get_non_ima_spill5(ent); + printf("Used Spillover Threshold Parameter 5: %" PRIu32 "\n", spill_5_used); + + cmp_par_5_used = cmp_ent_get_non_ima_cmp_par5(ent); + printf("Used Compression Parameter 5: %" PRIu32 "\n", cmp_par_5_used); +} + + /** * @brief parse the specific compressed entity header * @@ -2439,6 +2461,26 @@ static void cmp_ent_parese_specific_header(const struct cmp_entity *ent) case DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE: cmp_ent_parese_adaptive_imagette_header(ent); break; + case DATA_TYPE_OFFSET: + case DATA_TYPE_BACKGROUND: + case DATA_TYPE_SMEARING: + case DATA_TYPE_S_FX: + case DATA_TYPE_S_FX_EFX: + case DATA_TYPE_S_FX_NCOB: + case DATA_TYPE_S_FX_EFX_NCOB_ECOB: + case DATA_TYPE_L_FX: + case DATA_TYPE_L_FX_EFX: + case DATA_TYPE_L_FX_NCOB: + case DATA_TYPE_L_FX_EFX_NCOB_ECOB: + case DATA_TYPE_F_FX: + case DATA_TYPE_F_FX_EFX: + case DATA_TYPE_F_FX_NCOB: + case DATA_TYPE_F_FX_EFX_NCOB_ECOB: + case DATA_TYPE_F_CAM_OFFSET: + case DATA_TYPE_F_CAM_BACKGROUND: + case DATA_TYPE_CHUNK: + cmp_ent_parese_non_imagette_header(ent); + break; default: printf("For this data product type no parse functions is implemented!\n"); break; diff --git a/lib/common/cmp_entity.h b/lib/common/cmp_entity.h index 93e27e66d838d7978d7476ec07177163921c0bac..33c0edbfd8b3673c23f864231620fa3c944759c7 100644 --- a/lib/common/cmp_entity.h +++ b/lib/common/cmp_entity.h @@ -46,6 +46,7 @@ (GENERIC_HEADER_SIZE + SPECIFIC_NON_IMAGETTE_HEADER_SIZE) #define CMP_ENTITY_MAX_SIZE 0xFFFFFFUL +#define CMP_ENTITY_MAX_ORIGINAL_SIZE 0xFFFFFFUL #define RAW_BIT_DATA_TYPE_POS 15U diff --git a/lib/common/cmp_support.c b/lib/common/cmp_support.c index e13040d0d983ad2ab17980bb0bfeeb25cfd37e03..ce5134618b008258731ebdca8edc0f174f644dc2 100644 --- a/lib/common/cmp_support.c +++ b/lib/common/cmp_support.c @@ -69,7 +69,7 @@ int is_a_pow_of_2(unsigned int v) int cmp_data_type_is_invalid(enum cmp_data_type data_type) { - if (data_type <= DATA_TYPE_UNKNOWN || data_type > DATA_TYPE_F_CAM_BACKGROUND) + if (data_type <= DATA_TYPE_UNKNOWN || data_type > DATA_TYPE_CHUNK) return 1; return 0; @@ -407,9 +407,9 @@ unsigned int cmp_bit_to_4byte(unsigned int cmp_size_bit) int cmp_cfg_gen_par_is_invalid(const struct cmp_cfg *cfg, enum check_opt opt) { int cfg_invalid = 0; - int invalid_data_type = 1; - int unsupported_cmp_mode = 1; - int check_model_value = 1; + int invalid_data_type; + int unsupported_cmp_mode; + int check_model_value; uint32_t max_round_value = 0; const char *str = ""; @@ -881,20 +881,35 @@ int cmp_cfg_aux_is_invalid(const struct cmp_cfg *cfg) if (!cfg) return 1; - if (!cmp_aux_data_type_is_used(cfg->data_type)) { + switch (cfg->data_type) { + case DATA_TYPE_OFFSET: + case DATA_TYPE_F_CAM_OFFSET: + cfg_invalid += cmp_pars_are_invalid(cfg->cmp_par_offset_mean, cfg->spill_offset_mean, + cfg->cmp_mode, cfg->data_type, "offset mean"); + cfg_invalid += cmp_pars_are_invalid(cfg->cmp_par_offset_variance, cfg->spill_offset_variance, + cfg->cmp_mode, cfg->data_type, "offset variance"); + break; + case DATA_TYPE_BACKGROUND: + case DATA_TYPE_F_CAM_BACKGROUND: + cfg_invalid += cmp_pars_are_invalid(cfg->cmp_par_background_mean, cfg->spill_background_mean, + cfg->cmp_mode, cfg->data_type, "background mean"); + cfg_invalid += cmp_pars_are_invalid(cfg->cmp_par_background_variance, cfg->spill_background_variance, + cfg->cmp_mode, cfg->data_type, "background variance"); + cfg_invalid += cmp_pars_are_invalid(cfg->cmp_par_background_pixels_error, cfg->spill_background_pixels_error, + cfg->cmp_mode, cfg->data_type, "background outlier pixls num"); + break; + case DATA_TYPE_SMEARING: + cfg_invalid += cmp_pars_are_invalid(cfg->cmp_par_smearing_mean, cfg->spill_smearing_mean, + cfg->cmp_mode, cfg->data_type, "smearing mean"); + cfg_invalid += cmp_pars_are_invalid(cfg->cmp_par_smearing_variance, cfg->spill_smearing_variance, + cfg->cmp_mode, cfg->data_type, "smearing variance"); + cfg_invalid += cmp_pars_are_invalid(cfg->cmp_par_smearing_pixels_error, cfg->spill_smearing_pixels_error, + cfg->cmp_mode, cfg->data_type, "smearing outlier pixls num"); + break; + default: debug_print("Error: The compression data type is not an auxiliary science compression data type.\n"); cfg_invalid++; } - - cfg_invalid += cmp_pars_are_invalid(cfg->cmp_par_mean, cfg->spill_mean, - cfg->cmp_mode, cfg->data_type, "mean"); - cfg_invalid += cmp_pars_are_invalid(cfg->cmp_par_variance, cfg->spill_variance, - cfg->cmp_mode, cfg->data_type, "variance"); - - if (cfg->data_type != DATA_TYPE_OFFSET && cfg->data_type != DATA_TYPE_F_CAM_OFFSET) - cfg_invalid += cmp_pars_are_invalid(cfg->cmp_par_pixels_error, cfg->spill_pixels_error, - cfg->cmp_mode, cfg->data_type, "outlier pixls num"); - return cfg_invalid; } diff --git a/lib/common/cmp_support.h b/lib/common/cmp_support.h index 6e65e2142ae44153040655252a26da9703346fbc..23ed67b06f6e4479ae71e89e8ac68e58c66b0cf6 100644 --- a/lib/common/cmp_support.h +++ b/lib/common/cmp_support.h @@ -25,6 +25,8 @@ #include "cmp_max_used_bits.h" #include "cmp_cal_up_model.h" +#define CMP_COLLECTION_FILD_SIZE 2 + /* return code if the bitstream buffer is too small to store the whole bitstream */ #define CMP_ERROR_SMALL_BUF -2 @@ -129,7 +131,8 @@ enum cmp_data_type { DATA_TYPE_F_CAM_IMAGETTE, DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE, DATA_TYPE_F_CAM_OFFSET, - DATA_TYPE_F_CAM_BACKGROUND + DATA_TYPE_F_CAM_BACKGROUND, + DATA_TYPE_CHUNK }; @@ -147,6 +150,46 @@ enum cmp_mode { }; +struct cmp_par { + enum cmp_mode cmp_mode; /**< compression mode parameter */ + uint32_t model_value; /**< model weighting parameter */ + uint32_t lossy_par; /**< lossy compression parameter */ + + uint32_t nc_imagette; /**< compression parameter for imagette data compression */ + + uint32_t s_exp_flags; /**< compression parameter for exposure flags compression */ + uint32_t s_fx; /**< compression parameter for normal flux compression */ + uint32_t s_ncob; /**< compression parameter for normal center of brightness compression */ + uint32_t s_efx; /**< compression parameter for extended flux compression */ + uint32_t s_ecob; /**< compression parameter for executed center of brightness compression */ + + uint32_t l_exp_flags; /**< compression parameter for exposure flags compression */ + uint32_t l_fx; /**< compression parameter for normal flux compression */ + uint32_t l_ncob; /**< compression parameter for normal center of brightness compression */ + uint32_t l_efx; /**< compression parameter for extended flux compression */ + uint32_t l_ecob; /**< compression parameter for executed center of brightness compression */ + uint32_t l_fx_cob_variance; /**< compression parameter for flux/COB variance compression */ + + uint32_t saturated_imagette; /**< compression parameter for saturated imagette data compression */ + + uint32_t nc_offset_mean; + uint32_t nc_offset_variance; + uint32_t nc_background_mean; + uint32_t nc_background_variance; + uint32_t nc_background_outlier_pixels; + + uint32_t smearing_mean; + uint32_t smearing_variance_mean; + uint32_t smearing_outlier_pixels; + + uint32_t fc_imagette; + uint32_t fc_offset_mean; + uint32_t fc_offset_variance; + uint32_t fc_background_mean; + uint32_t fc_background_variance; + uint32_t fc_background_outlier_pixels; +}; + /** * @brief The cmp_cfg structure can contain the complete configuration of the HW as * well as the SW compressor. @@ -154,6 +197,7 @@ enum cmp_mode { * @note the rdcu_***_adr parameters are ignored for SW compression */ +__extension__ struct cmp_cfg { void *input_buf; /**< Pointer to the data to compress buffer */ void *model_buf; /**< Pointer to the model buffer */ @@ -176,31 +220,84 @@ struct cmp_cfg { */ uint32_t model_value; /**< Model weighting parameter */ uint32_t round; /**< lossy compression parameter */ - uint32_t golomb_par; /**< Golomb parameter for imagette data compression */ - uint32_t spill; /**< Spillover threshold parameter for imagette compression */ - uint32_t ap1_golomb_par; /**< Adaptive 1 spillover threshold for imagette data; HW only */ - uint32_t ap1_spill; /**< Adaptive 1 Golomb parameter for imagette data; HW only */ - uint32_t ap2_golomb_par; /**< Adaptive 2 spillover threshold for imagette data; HW only */ - uint32_t ap2_spill; /**< Adaptive 2 Golomb parameter; HW only */ - uint32_t cmp_par_exp_flags; /**< Compression parameter for exposure flags compression */ - uint32_t spill_exp_flags; /**< Spillover threshold parameter for exposure flags compression */ - uint32_t cmp_par_fx; /**< Compression parameter for normal flux compression */ - uint32_t spill_fx; /**< Spillover threshold parameter for normal flux compression */ - uint32_t cmp_par_ncob; /**< Compression parameter for normal center of brightness compression */ - uint32_t spill_ncob; /**< Spillover threshold parameter for normal center of brightness compression */ - uint32_t cmp_par_efx; /**< Compression parameter for extended flux compression */ - uint32_t spill_efx; /**< Spillover threshold parameter for extended flux compression */ - uint32_t cmp_par_ecob; /**< Compression parameter for executed center of brightness compression */ - uint32_t spill_ecob; /**< Spillover threshold parameter for executed center of brightness compression */ - uint32_t cmp_par_fx_cob_variance; /**< Compression parameter for flux/COB variance compression */ - uint32_t spill_fx_cob_variance; /**< Spillover threshold parameter for flux/COB variance compression */ - uint32_t cmp_par_mean; /**< Compression parameter for auxiliary science mean compression */ - uint32_t spill_mean; /**< Spillover threshold parameter for auxiliary science mean compression */ - uint32_t cmp_par_variance; /**< Compression parameter for auxiliary science variance compression */ - uint32_t spill_variance; /**< Spillover threshold parameter for auxiliary science variance compression */ - uint32_t cmp_par_pixels_error; /**< Compression parameter for auxiliary science outlier pixels number compression */ - uint32_t spill_pixels_error; /**< Spillover threshold parameter for auxiliary science outlier pixels number compression */ - const struct cmp_max_used_bits *max_used_bits; /**< the maximum length of the different data products types in bits */ + union { + uint32_t cmp_par_1; + uint32_t golomb_par; /* TODO: remove this */ /**< Golomb parameter for imagette data compression */ + uint32_t cmp_par_imagette; /**< Golomb parameter for imagette data compression */ + uint32_t cmp_par_exp_flags; /**< Compression parameter for exposure flags compression */ + }; + union { + uint32_t spill_par_1; + uint32_t spill; /* TODO: remove this */ /**< Spillover threshold parameter for imagette data compression */ + uint32_t spill_imagette; /**< Spillover threshold parameter for imagette data compression */ + uint32_t spill_exp_flags; /**< Spillover threshold parameter for exposure flags compression */ + }; + + union { + uint32_t cmp_par_2; + uint32_t ap1_golomb_par; /**< Adaptive 2 spillover threshold for imagette data; HW only */ + uint32_t cmp_par_fx; /**< Compression parameter for normal flux compression */ + uint32_t cmp_par_offset_mean; /**< Compression parameter for auxiliary science mean compression */ + }; + union { + uint32_t spill_par_2; + uint32_t ap1_spill; /**< Adaptive 2 Golomb parameter; HW only */ + uint32_t spill_fx; /**< Spillover threshold parameter for normal flux compression */ + uint32_t spill_offset_mean; /**< Spillover threshold parameter for auxiliary science mean compression */ + }; + + union { + uint32_t cmp_par_3; + uint32_t ap2_golomb_par; /**< Adaptive 2 spillover threshold for imagette data; HW only */ + uint32_t cmp_par_ncob; /**< Compression parameter for normal center of brightness compression */ + uint32_t cmp_par_offset_variance; /**< Compression parameter for auxiliary science variance compression */ + }; + union { + uint32_t spill_par_3; + uint32_t ap2_spill; /**< Adaptive 2 Golomb parameter; HW only */ + uint32_t spill_ncob; /**< Spillover threshold parameter for normal center of brightness compression */ + uint32_t spill_offset_variance; /**< Spillover threshold parameter for auxiliary science variance compression */ + }; + + union { + uint32_t cmp_par_4; + uint32_t cmp_par_efx; /**< Compression parameter for extended flux compression */ + uint32_t cmp_par_background_mean; /**< Compression parameter for auxiliary science mean compression */ + uint32_t cmp_par_smearing_mean; /**< Compression parameter for auxiliary science mean compression */ + }; + union { + uint32_t spill_par_4; + uint32_t spill_efx; /**< Spillover threshold parameter for extended flux compression */ + uint32_t spill_background_mean; /**< Spillover threshold parameter for auxiliary science mean compression */ + uint32_t spill_smearing_mean; /**< Spillover threshold parameter for auxiliary science mean compression */ + }; + + union { + uint32_t cmp_par_5; + uint32_t cmp_par_ecob; /**< Compression parameter for executed center of brightness compression */ + uint32_t cmp_par_background_variance; /**< Compression parameter for auxiliary science variance compression */ + uint32_t cmp_par_smearing_variance; /**< Compression parameter for auxiliary science variance compression */ + }; + union { + uint32_t spill_par_5; + uint32_t spill_ecob; /**< Spillover threshold parameter for executed center of brightness compression */ + uint32_t spill_background_variance; /**< Spillover threshold parameter for auxiliary science variance compression */ + uint32_t spill_smearing_variance; /**< Spillover threshold parameter for auxiliary science variance compression */ + }; + + union { + uint32_t cmp_par_6; + uint32_t cmp_par_fx_cob_variance; /**< Compression parameter for flux/COB variance compression */ + uint32_t cmp_par_background_pixels_error; /**< Compression parameter for auxiliary science outlier pixels number compression */ + uint32_t cmp_par_smearing_pixels_error; /**< Compression parameter for auxiliary science outlier pixels number compression */ + }; + union { + uint32_t spill_par_6; + uint32_t spill_fx_cob_variance; /**< Spillover threshold parameter for flux/COB variance compression */ + uint32_t spill_background_pixels_error; /**< Spillover threshold parameter for auxiliary science outlier pixels number compression */ + uint32_t spill_smearing_pixels_error; /**< Spillover threshold parameter for auxiliary science outlier pixels number compression */ + }; + const struct cmp_max_used_bits *max_used_bits; /**< the maximum length of the different data products types in bits */ }; diff --git a/lib/common/list.h b/lib/common/list.h index 41ca9577fe5f0f6a6c440ff2518d7b275e977f27..5613c64f0e6d53782407924f189d7c70e96dc6a0 100644 --- a/lib/common/list.h +++ b/lib/common/list.h @@ -115,7 +115,7 @@ static inline void INIT_LIST_HEAD(struct list_head *list) * @note add (void *) cast to suppress wcast-align warning */ #define list_entry(ptr, type, member) \ - ((type *)((void *)((char *)(ptr)-(unsigned long)(&((type *)0)->member)))) + ((type *)((void *)((char *)(ptr)-__builtin_offsetof(type, member)))) /** * list_first_entry - get the first element from a list @@ -168,7 +168,7 @@ static inline void INIT_LIST_HEAD(struct list_head *list) * @param member the name of the list_head within the struct. */ #define list_prev_entry(pos, member) \ - list_entry((pos)->member.prev, typeof(*(pos)), member) + list_entry((pos)->member.prev, __typeof__(*(pos)), member) /** * list_for_each - iterate over a list @@ -252,10 +252,10 @@ static inline void INIT_LIST_HEAD(struct list_head *list) */ #define list_entry_do_while(pos, head, member, type, _CODE_) \ - list_entry_do(pos,head,member,type) \ + list_entry_do(pos, head, member, type) \ { \ _CODE_; \ - } list_entry_while(pos,head,member,type) + } list_entry_while(pos, head, member, type) /** * @brief reverse iterate over list of given type @@ -268,7 +268,7 @@ static inline void INIT_LIST_HEAD(struct list_head *list) #define list_for_each_entry_rev(pos, head, member) \ for (pos = list_entry((head)->prev, __typeof__(*pos), member); \ &pos->member != (head); \ - pos = list_entry(pos->member.prev, __typeof(*pos), member)) + pos = list_entry(pos->member.prev, __typeof__(*pos), member)) /* @@ -414,8 +414,8 @@ static inline int list_filled(struct list_head *head) static inline void list_move_tail(struct list_head *list, struct list_head *head) { - __list_del(list->prev, list->next); - list_add_tail(list, head); + __list_del(list->prev, list->next); + list_add_tail(list, head); } @@ -426,12 +426,12 @@ static inline void list_move_tail(struct list_head *list, static inline void list_rotate_left(struct list_head *head) { - struct list_head *first; + struct list_head *first; - if (!list_empty(head)) { - first = head->next; - list_move_tail(first, head); - } + if (!list_empty(head)) { + first = head->next; + list_move_tail(first, head); + } } diff --git a/lib/decompress/decmp.c b/lib/decompress/decmp.c index e4505a396f3817b6a1b1634d1803f30d3d5e84e7..b233bfe6bfd8cc75edd5d208d79a94ed0102b075 100644 --- a/lib/decompress/decmp.c +++ b/lib/decompress/decmp.c @@ -126,7 +126,7 @@ static uint32_t rice_decoder(struct bit_decoder *dec, uint32_t m, uint32_t log2_ * @brief decode the next Golomb code word in the bitstream * * @param dec a pointer to a bit_decoder context - * @param m Golomb parameter (have to be bigger than 0) + * @param m Golomb parameter (has to be bigger than 0) * @param log2_m is ilog_2(m) calculate outside function for better * performance * @@ -169,7 +169,7 @@ static uint32_t golomb_decoder(struct bit_decoder *dec, uint32_t m, uint32_t log /** * @brief select the decoder based on the used Golomb parameter * - * @param golomb_par Golomb parameter, have to be bigger than 0 + * @param golomb_par Golomb parameter, has to be bigger than 0 * * @note if the Golomb parameter is a power of 2 we can use the faster Rice decoder * @note if the Golomb parameter is 1 we can use the even faster unary decoder @@ -400,7 +400,7 @@ static void configure_decoder_setup(struct decoder_setup *setup, struct bit_deco * @returns 0 on success; otherwise error */ -static int decompress_imagette(struct cmp_cfg *cfg, struct bit_decoder *dec) +static int decompress_imagette(const struct cmp_cfg *cfg, struct bit_decoder *dec) { size_t i; int err; @@ -530,7 +530,7 @@ static int decompress_s_fx(const struct cmp_cfg *cfg, struct bit_decoder *dec) decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -598,7 +598,7 @@ static int decompress_s_fx_efx(const struct cmp_cfg *cfg, struct bit_decoder *de decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -675,7 +675,7 @@ static int decompress_s_fx_ncob(const struct cmp_cfg *cfg, struct bit_decoder *d decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -759,7 +759,7 @@ static int decompress_s_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, struct bit_d decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -868,7 +868,7 @@ static int decompress_f_fx(const struct cmp_cfg *cfg, struct bit_decoder *dec) decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -926,7 +926,7 @@ static int decompress_f_fx_efx(const struct cmp_cfg *cfg, struct bit_decoder *de decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -994,7 +994,7 @@ static int decompress_f_fx_ncob(const struct cmp_cfg *cfg, struct bit_decoder *d decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -1069,7 +1069,7 @@ static int decompress_f_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, struct bit_d decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -1169,7 +1169,7 @@ static int decompress_l_fx(const struct cmp_cfg *cfg, struct bit_decoder *dec) decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -1246,7 +1246,7 @@ static int decompress_l_fx_efx(const struct cmp_cfg *cfg, struct bit_decoder *de decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -1333,7 +1333,7 @@ static int decompress_l_fx_ncob(const struct cmp_cfg *cfg, struct bit_decoder *d decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -1443,7 +1443,7 @@ static int decompress_l_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, struct bit_d decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -1577,7 +1577,7 @@ static int decompress_offset(const struct cmp_cfg *cfg, struct bit_decoder *dec) decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -1601,10 +1601,10 @@ static int decompress_offset(const struct cmp_cfg *cfg, struct bit_decoder *dec) variance_bits_used = cfg->max_used_bits->nc_offset_variance; break; } - configure_decoder_setup(&setup_mean, dec, cfg->cmp_mode, cfg->cmp_par_mean, cfg->spill_mean, + configure_decoder_setup(&setup_mean, dec, cfg->cmp_mode, cfg->cmp_par_offset_mean, cfg->spill_offset_mean, cfg->round, mean_bits_used); - configure_decoder_setup(&setup_var, dec, cfg->cmp_mode, cfg->cmp_par_variance, cfg->spill_variance, + configure_decoder_setup(&setup_var, dec, cfg->cmp_mode, cfg->cmp_par_offset_variance, cfg->spill_offset_variance, cfg->round, variance_bits_used); } @@ -1662,7 +1662,7 @@ static int decompress_background(const struct cmp_cfg *cfg, struct bit_decoder * decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -1688,13 +1688,13 @@ static int decompress_background(const struct cmp_cfg *cfg, struct bit_decoder * break; } - configure_decoder_setup(&setup_mean, dec, cfg->cmp_mode, cfg->cmp_par_mean, cfg->spill_mean, + configure_decoder_setup(&setup_mean, dec, cfg->cmp_mode, cfg->cmp_par_background_mean, cfg->spill_background_mean, cfg->round, mean_used_bits); - configure_decoder_setup(&setup_var, dec, cfg->cmp_mode, cfg->cmp_par_variance, cfg->spill_variance, + configure_decoder_setup(&setup_var, dec, cfg->cmp_mode, cfg->cmp_par_background_variance, cfg->spill_background_variance, cfg->round, variance_used_bits); - configure_decoder_setup(&setup_pix, dec, cfg->cmp_mode, cfg->cmp_par_pixels_error, cfg->spill_pixels_error, + configure_decoder_setup(&setup_pix, dec, cfg->cmp_mode, cfg->cmp_par_background_pixels_error, cfg->spill_background_pixels_error, cfg->round, outlier_pixels_used_bits); } @@ -1759,7 +1759,7 @@ static int decompress_smearing(const struct cmp_cfg *cfg, struct bit_decoder *de decompress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, (void **)&up_model_buf, cfg); - bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+MULTI_ENTRY_HDR_SIZE, cfg->buffer_length-MULTI_ENTRY_HDR_SIZE); + bit_init_decoder(dec, (uint8_t *)cfg->icu_output_buf+COLLECTION_HDR_SIZE, cfg->buffer_length-COLLECTION_HDR_SIZE); if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; @@ -1769,11 +1769,11 @@ static int decompress_smearing(const struct cmp_cfg *cfg, struct bit_decoder *de next_model_p = data_buf; } - configure_decoder_setup(&setup_mean, dec, cfg->cmp_mode, cfg->cmp_par_mean, cfg->spill_mean, + configure_decoder_setup(&setup_mean, dec, cfg->cmp_mode, cfg->cmp_par_smearing_mean, cfg->spill_smearing_mean, cfg->round, cfg->max_used_bits->smearing_mean); - configure_decoder_setup(&setup_var, dec, cfg->cmp_mode, cfg->cmp_par_variance, cfg->spill_variance, + configure_decoder_setup(&setup_var, dec, cfg->cmp_mode, cfg->cmp_par_smearing_variance, cfg->spill_smearing_variance, cfg->round, cfg->max_used_bits->smearing_variance_mean); - configure_decoder_setup(&setup_pix, dec, cfg->cmp_mode, cfg->cmp_par_pixels_error, cfg->spill_pixels_error, + configure_decoder_setup(&setup_pix, dec, cfg->cmp_mode, cfg->cmp_par_smearing_pixels_error, cfg->spill_smearing_pixels_error, cfg->round, cfg->max_used_bits->smearing_outlier_pixels); for (i = 0; ; i++) { @@ -1820,7 +1820,7 @@ static int decompress_smearing(const struct cmp_cfg *cfg, struct bit_decoder *de * @returns the size of the decompressed data on success; returns negative on failure */ -static int decompressed_data_internal(struct cmp_cfg *cfg) +static int decompressed_data_internal(const struct cmp_cfg *cfg) { int err; uint32_t data_size; @@ -1945,7 +1945,7 @@ static int decompressed_data_internal(struct cmp_cfg *cfg) debug_print("Error: Data consistency check failed. %s", please_check_str); break; case BIT_END_OF_BUFFER: - /* check if non consumed bit are zero */ + /* check if non consumed bits are zero */ if (bit_read_bits(&dec, sizeof(dec.bit_container)*8 - dec.bits_consumed) == 0) break; /* fall through */ @@ -1973,8 +1973,6 @@ static int decompressed_data_internal(struct cmp_cfg *cfg) static int cmp_ent_read_header(struct cmp_entity *ent, struct cmp_cfg *cfg) { - int32_t samples; - if (!cfg) return -1; @@ -1993,14 +1991,23 @@ static int cmp_ent_read_header(struct cmp_entity *ent, struct cmp_cfg *cfg) cfg->round = cmp_ent_get_lossy_cmp_par(ent); cfg->buffer_length = cmp_ent_get_cmp_data_size(ent); - samples = cmp_input_size_to_samples(cmp_ent_get_original_size(ent), cfg->data_type); - if (samples < 0) { - debug_print("Error: original_size and data product type in the compression header are not compatible.\n"); + if (cfg->data_type == DATA_TYPE_CHUNK) { cfg->samples = 0; - return -1; - } + if (cfg->buffer_length < (COLLECTION_HDR_SIZE + CMP_COLLECTION_FILD_SIZE) || + (cfg->buffer_length < COLLECTION_HDR_SIZE && (cfg->cmp_mode == CMP_MODE_RAW))) { + debug_print("Error: The compressed data size in the compression header is smaller than a collection header.\n"); + return -1; + } + } else { + int32_t samples = cmp_input_size_to_samples(cmp_ent_get_original_size(ent), cfg->data_type); - cfg->samples = (uint32_t)samples; + if (samples < 0) { + debug_print("Error: original_size and data product type in the compression header are not compatible.\n"); + cfg->samples = 0; + return -1; + } + cfg->samples = (uint32_t)samples; + } cfg->icu_output_buf = cmp_ent_get_data_buf(ent); @@ -2010,9 +2017,14 @@ static int cmp_ent_read_header(struct cmp_entity *ent, struct cmp_cfg *cfg) return -1; } - if (cfg->cmp_mode == CMP_MODE_RAW) + if (cfg->cmp_mode == CMP_MODE_RAW) { + if (cmp_ent_get_original_size(ent) != cmp_ent_get_cmp_data_size(ent)) { + debug_print("Error: The compressed data size and the decompressed original data size in the compression header should be the same in raw mode.\n"); + return -1; + } /* no specific header is used for raw data we are done */ return 0; + } switch (cfg->data_type) { case DATA_TYPE_IMAGETTE_ADAPTIVE: @@ -2034,13 +2046,6 @@ static int cmp_ent_read_header(struct cmp_entity *ent, struct cmp_cfg *cfg) case DATA_TYPE_BACKGROUND: case DATA_TYPE_F_CAM_BACKGROUND: case DATA_TYPE_SMEARING: - cfg->cmp_par_mean = cmp_ent_get_non_ima_cmp_par1(ent); - cfg->spill_mean = cmp_ent_get_non_ima_spill1(ent); - cfg->cmp_par_variance = cmp_ent_get_non_ima_cmp_par2(ent); - cfg->spill_variance = cmp_ent_get_non_ima_spill2(ent); - cfg->cmp_par_pixels_error = cmp_ent_get_non_ima_cmp_par3(ent); - cfg->spill_pixels_error = cmp_ent_get_non_ima_spill3(ent); - break; case DATA_TYPE_S_FX: case DATA_TYPE_S_FX_EFX: case DATA_TYPE_S_FX_NCOB: @@ -2053,6 +2058,7 @@ static int cmp_ent_read_header(struct cmp_entity *ent, struct cmp_cfg *cfg) case DATA_TYPE_F_FX_EFX: case DATA_TYPE_F_FX_NCOB: case DATA_TYPE_F_FX_EFX_NCOB_ECOB: + case DATA_TYPE_CHUNK: cfg->cmp_par_exp_flags = cmp_ent_get_non_ima_cmp_par1(ent); cfg->spill_exp_flags = cmp_ent_get_non_ima_spill1(ent); cfg->cmp_par_fx = cmp_ent_get_non_ima_cmp_par2(ent); @@ -2077,15 +2083,146 @@ static int cmp_ent_read_header(struct cmp_entity *ent, struct cmp_cfg *cfg) } +/* TODO: doc string */ + +static uint8_t *get_next_cmp_collection(uint8_t *cmp_col, int raw_mode_flag) +{ + uint8_t *next_cmp_col = cmp_col; + if (raw_mode_flag) { + /* If all data is "compressed" in raw mode, the collection is + * simply copied into the bitstream, with the data in big-endian + * order. We get the size of the uncompressed data by reading + * the length of the data collection (= (un)compressed data)from + * the collection header + * |---------------------|- + * | COLLECTION HDR | + * | | 12 bytes + * |---------------------|- + * | uncompressed data | collection + * | *-*-* | data size + * |---------------------|- + * Fields not scaled correctly + */ + next_cmp_col += cmp_col_get_data_length((const struct collection_hdr *)cmp_col); + } else { + /* If a non-raw mode is used to compress all collections, a + * 2-byte big endian field with the size of the compressed data + * is prefixed (without the size of the file itself and without + * the size of the collection header). This is followed by a + * collection header, followed by the compressed data. + * |---------------------| - + * |compressed collection| + * | data size | 2 bytes + * |---------------------|- + * | COLLECTION HDR | + * | | 12 bytes + * |---------------------|- + * | compressed data | compressed collection + * | *-*-* | data size + * |---------------------|- + * Fields not scaled correctly + */ + uint16_t cmp_data_size; + + memcpy(&cmp_data_size, cmp_col, sizeof(cmp_data_size)); + be16_to_cpus(&cmp_data_size); + next_cmp_col += CMP_COLLECTION_FILD_SIZE + cmp_data_size; + } + return next_cmp_col + COLLECTION_HDR_SIZE; +} + + +/* TODO: doc string */ + +static int get_num_of_chunks(struct cmp_entity *ent) +{ + uint8_t *cmp_data_p = cmp_ent_get_data_buf(ent); + long cmp_data_size = cmp_ent_get_cmp_data_size(ent); + int all_data_uncmp = cmp_ent_get_data_type_raw_bit(ent); + int n = 0; + uint8_t *p = cmp_data_p; + /* highest plausible address of compressed collection */ + uint8_t *limit_ptr = cmp_data_p + cmp_data_size - COLLECTION_HDR_SIZE; + + if (!all_data_uncmp) + limit_ptr -= CMP_COLLECTION_FILD_SIZE; + + while (p < limit_ptr) { + p = get_next_cmp_collection(p, all_data_uncmp); + n++; + } + + if (p-cmp_data_p != cmp_data_size) { + debug_print("Error: The sum of the compressed collection does not match the size of the data in the compression header.\n"); + return -1; + } + return n; +} + + +/* TODO: doc string */ + +static long parse_cmp_collection(uint8_t *cmp_col, int n, int raw_mode_flag, struct cmp_cfg *cfg) +{ + int i; + long decmp_pos = 0; /* position where to put the uncompressed result */ + /* offset between the compressed collection and the collection header */ + uint16_t const col_offset = raw_mode_flag ? 0 : CMP_COLLECTION_FILD_SIZE; + /* pointer to the collection header */ + const struct collection_hdr *col_hdr = (const struct collection_hdr *)(cmp_col + col_offset); + uint32_t cmp_data_size; /* size of the compressed data in the collection (not including the header) */ + uint16_t original_col_size; /* size of the decompressed collection data (not including the header) */ + size_t sample_size; + + /* get to the collection we want to decompress */ + for (i = 0; i < n; i++) { + decmp_pos += cmp_col_get_size(col_hdr); + cmp_col = get_next_cmp_collection(cmp_col, raw_mode_flag); + col_hdr = (const struct collection_hdr *)(cmp_col + col_offset); + } + + cmp_data_size = (uint32_t)(get_next_cmp_collection(cmp_col, raw_mode_flag) + - cmp_col - col_offset - COLLECTION_HDR_SIZE); + original_col_size = cmp_col_get_data_length(col_hdr); + + if (cmp_data_size > original_col_size) { + debug_print("Error: Collection %i, the size of the compressed collection is larger than that of the uncompressed collection.\n", i); + return -1; + } + + /* if the compressed data size == original_col_size the collection data + * was put uncompressed into the bitstream */ + if ((cmp_data_size == original_col_size) && !raw_mode_flag) + cfg->cmp_mode = CMP_MODE_RAW; + + cfg->icu_output_buf = (void *)(cmp_col + col_offset); /* unaligned cast -> reading compressed data as uint8_t * */ + cfg->buffer_length = cmp_data_size + COLLECTION_HDR_SIZE; + + cfg->data_type = convert_subservice_to_cmp_data_type(cmp_col_get_subservice(col_hdr)); + sample_size = size_of_a_sample(cfg->data_type); + if (!sample_size) + return -1; + + if (original_col_size % sample_size) { + debug_print("Error: The size of the collection is not a multiple of a collection entry.\n"); + return -1; + } + cfg->samples = original_col_size / sample_size; + + return decmp_pos; +} + + /** * @brief decompress a compression entity * + * @note this function assumes that the entity size in the ent header is correct * @param ent pointer to the compression entity to be decompressed * @param model_of_data pointer to model data buffer (can be NULL if no * model compression mode is used) * @param up_model_buf pointer to store the updated model for the next model * mode compression (can be the same as the model_of_data - * buffer for an in-place update or NULL if updated model is not needed) + * buffer for an in-place update or NULL if the updated model is not needed) * @param decompressed_data pointer to the decompressed data buffer (can be NULL) * * @returns the size of the decompressed data on success; returns negative on failure @@ -2094,22 +2231,56 @@ static int cmp_ent_read_header(struct cmp_entity *ent, struct cmp_cfg *cfg) int decompress_cmp_entiy(struct cmp_entity *ent, void *model_of_data, void *up_model_buf, void *decompressed_data) { - int err; struct cmp_cfg cfg; + int decmp_size; + int raw_mode_flag; + int i, n_chunks; memset(&cfg, 0, sizeof(struct cmp_cfg)); - cfg.model_buf = model_of_data; - cfg.icu_new_model_buf = up_model_buf; - cfg.input_buf = decompressed_data; if (!ent) return -1; - err = cmp_ent_read_header(ent, &cfg); - if (err) + decmp_size = (int)cmp_ent_get_original_size(ent); + if (!decompressed_data) + return decmp_size; + + if (cmp_ent_read_header(ent, &cfg)) return -1; - return decompressed_data_internal(&cfg); + if (cfg.data_type != DATA_TYPE_CHUNK) { + /* perform a non-chunk decompression */ + cfg.model_buf = model_of_data; + cfg.icu_new_model_buf = up_model_buf; + cfg.input_buf = decompressed_data; + return decompressed_data_internal(&cfg); + } + + /* perform a chunk decompression */ + n_chunks = get_num_of_chunks(ent); + if (n_chunks <= 0) + return -1; + + raw_mode_flag = (cfg.cmp_mode == CMP_MODE_RAW); + + for (i = 0; i < n_chunks; i++) { + int decmp_chunk_size; + struct cmp_cfg cmp_cpy = cfg; + long offset = parse_cmp_collection(cmp_ent_get_data_buf(ent), i, + raw_mode_flag, &cmp_cpy); + if (offset < 0) + return -1; + + cmp_cpy.input_buf = (uint8_t *)decompressed_data + offset; + if (cmp_cpy.model_buf) + cmp_cpy.model_buf = (uint8_t *)model_of_data + offset; + if (cmp_cpy.icu_new_model_buf) + cmp_cpy.model_buf = (uint8_t *)up_model_buf + offset; + decmp_chunk_size = decompressed_data_internal(&cmp_cpy); + if (decmp_chunk_size < 0) + return decmp_chunk_size; + } + return decmp_size; } @@ -2119,12 +2290,13 @@ int decompress_cmp_entiy(struct cmp_entity *ent, void *model_of_data, * @param compressed_data pointer to the RDCU compressed data (without a * compression entity header) * @param info pointer to a decompression information structure - * consisting the metadata of the compression + * consisting of the metadata of the compression * @param model_of_data pointer to model data buffer (can be NULL if no * model compression mode is used) * @param up_model_buf pointer to store the updated model for the next model * mode compression (can be the same as the model_of_data - * buffer for in-place update or NULL if updated model is not needed) + * buffer for an in-place update or NULL if the + * updated model is not needed) * @param decompressed_data pointer to the decompressed data buffer (can be NULL) * * @returns the size of the decompressed data on success; returns negative on failure diff --git a/lib/icu_compress/cmp_icu.c b/lib/icu_compress/cmp_icu.c index 48a7f98f1978a0309a2378493f294a8e646d1223..42233cd5126213ae2111d9131f6f9c00c205464f 100644 --- a/lib/icu_compress/cmp_icu.c +++ b/lib/icu_compress/cmp_icu.c @@ -39,6 +39,35 @@ #include "../common/cmp_entity.h" #include "../cmp_icu.h" +#include "../cmp_chunk.h" + + +/** + * @brief default implementation of the get_timestamp() function + * + * @returns 0 + */ + +static uint64_t default_get_timestamp(void) +{ + return 0; +} + + +/** + * @brief function pointer to a function returning a current PLATO timestamp + * initialised with the compress_chunk_init() function + */ + +static uint64_t (*get_timestamp)(void) = default_get_timestamp; + + +/** + * @brief holding the version_identifier for the compression header + * initialised with the compress_chunk_init() function + */ + +static uint32_t version_identifier; /** @@ -67,6 +96,20 @@ struct encoder_setupt { }; +/* TODO: doc string */ + +enum chunk_type { + CHUNK_TYPE_UNKNOWN, + CHUNK_TYPE_NCAM_IMGAETTE, + CHUNK_TYPE_SHORT_CADENCE, + CHUNK_TYPE_LONG_CADENCE, + CHUNK_TYPE_SAT_IMGAETTE, + CHUNK_TYPE_OFFSET_BACKGROUND, /* N-CAM */ + CHUNK_TYPE_SMEARING, + CHUNK_TYPE_F_CHAIN, +}; + + /** * @brief create an ICU compression configuration * @@ -92,7 +135,7 @@ struct cmp_cfg cmp_cfg_icu_create(enum cmp_data_type data_type, enum cmp_mode cm cfg.round = lossy_par; cfg.max_used_bits = &MAX_USED_BITS_SAFE; - if (cmp_cfg_gen_par_is_invalid(&cfg, ICU_CHECK)) + if (cmp_cfg_gen_par_is_invalid(&cfg, ICU_CHECK) || data_type == DATA_TYPE_CHUNK) cfg.data_type = DATA_TYPE_UNKNOWN; return cfg; @@ -152,6 +195,7 @@ uint32_t cmp_cfg_icu_buffers(struct cmp_cfg *cfg, void *data_to_compress, cfg->samples = data_samples; cfg->icu_new_model_buf = updated_model; cfg->icu_output_buf = compressed_data; + /* cfg->buffer_length = cmp_data_size; */ cfg->buffer_length = compressed_data_len_samples; if (cmp_cfg_icu_buffers_is_invalid(cfg)) @@ -305,13 +349,35 @@ int cmp_cfg_aux(struct cmp_cfg *cfg, if (!cfg) return -1; - cfg->cmp_par_mean = cmp_par_mean; - cfg->cmp_par_variance = cmp_par_variance; - cfg->cmp_par_pixels_error = cmp_par_pixels_error; - - cfg->spill_mean = spillover_mean; - cfg->spill_variance = spillover_variance; - cfg->spill_pixels_error = spillover_pixels_error; + switch (cfg->data_type) { + case DATA_TYPE_OFFSET: + case DATA_TYPE_F_CAM_OFFSET: + cfg->cmp_par_offset_mean = cmp_par_mean; + cfg->spill_offset_mean = spillover_mean; + cfg->cmp_par_offset_variance = cmp_par_variance; + cfg->spill_offset_variance = spillover_variance; + break; + case DATA_TYPE_BACKGROUND: + case DATA_TYPE_F_CAM_BACKGROUND: + cfg->cmp_par_background_mean = cmp_par_mean; + cfg->spill_background_mean = spillover_mean; + cfg->cmp_par_background_variance = cmp_par_variance; + cfg->spill_background_variance = spillover_variance; + cfg->cmp_par_background_pixels_error = cmp_par_pixels_error; + cfg->spill_background_pixels_error = spillover_pixels_error; + break; + case DATA_TYPE_SMEARING: + cfg->cmp_par_smearing_mean = cmp_par_mean; + cfg->spill_smearing_mean = spillover_mean; + cfg->cmp_par_smearing_variance = cmp_par_variance; + cfg->spill_smearing_variance = spillover_variance; + cfg->cmp_par_smearing_pixels_error = cmp_par_pixels_error; + cfg->spill_smearing_pixels_error = spillover_pixels_error; + break; + default: + debug_print("Error: The compression data type is not an auxiliary science compression data type.\n"); + return -1; + } if (cmp_cfg_aux_is_invalid(cfg)) return -1; @@ -459,9 +525,8 @@ static uint32_t rice_encoder(uint32_t value, uint32_t m, uint32_t log2_m, /* * NOTE: If log2_m = 31 -> rl = 32, (q << rl) leads to an undefined * behavior. However, in this case, a valid code with a maximum of 32 - * bits can only be formed if q = 0 and qc = 0. Any shift with 0 << x always - * results in 0, which forms the correct codeword in this case. To prevent - * undefined behavior, the right shift operand is masked (& 0x1FU) + * bits can only be formed if q = 0 and qc = 0. To prevent undefined b + * ehavior, the right shift operand is masked (& 0x1FU) */ return rl + q; /* calculate the length of the code word */ @@ -710,15 +775,14 @@ static int encode_value(uint32_t data, uint32_t model, int stream_len, * cmp_buffer in uint32_t words * * @param buffer_length length of the icu_output_buf in samples - * @param data_type used compression data type * * @returns buffer size in bits * */ -static uint32_t cmp_buffer_length_to_bits(uint32_t buffer_length, enum cmp_data_type data_type) +static uint32_t cmp_buffer_length_to_bits(uint32_t buffer_length) { - return (cmp_cal_size_of_data(buffer_length, data_type) & ~0x3U) * CHAR_BIT; + return (buffer_length & ~0x3U) * 8; } @@ -746,7 +810,7 @@ static void configure_encoder_setup(struct encoder_setupt *setup, setup->max_data_bits = max_data_bits; setup->lossy_par = lossy_par; setup->bitstream_adr = cfg->icu_output_buf; - setup->max_stream_len = cmp_buffer_length_to_bits(cfg->buffer_length, cfg->data_type); + setup->max_stream_len = cmp_buffer_length_to_bits(cfg->buffer_length); if (cfg->cmp_mode != CMP_MODE_STUFF) { setup->encoder_par2 = ilog_2(cmp_par); @@ -792,9 +856,8 @@ static void configure_encoder_setup(struct encoder_setupt *setup, * bigger than the max_used_bits parameter allows */ -static int compress_imagette(const struct cmp_cfg *cfg) +static int compress_imagette(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct encoder_setupt setup; uint32_t max_data_bits; @@ -847,43 +910,6 @@ static int compress_imagette(const struct cmp_cfg *cfg) } -/** - * @brief compress the multi-entry packet header structure and sets the data, - * model and up_model pointers to the data after the header - * - * @param data pointer to a pointer pointing to the data to be compressed - * @param model pointer to a pointer pointing to the model of the data - * @param up_model pointer to a pointer pointing to the updated model buffer - * @param compressed_data pointer to the compressed data buffer - * - * @returns the bit length of the bitstream on success; negative on error, - * - * @note the (void **) cast relies on all pointer types having the same internal - * representation which is common, but not universal; http://www.c-faq.com/ptrs/genericpp.html - */ - -static int compress_multi_entry_hdr(void **data, void **model, void **up_model, - void *compressed_data) -{ - if (*up_model) { - if (*data) - memcpy(*up_model, *data, COLLECTION_HDR_SIZE); - *up_model = (uint8_t *)*up_model + COLLECTION_HDR_SIZE; - } - - if (*data) { - if (compressed_data) - memcpy(compressed_data, *data, COLLECTION_HDR_SIZE); - *data = (uint8_t *)*data + COLLECTION_HDR_SIZE; - } - - if (*model) - *model = (uint8_t *)*model + COLLECTION_HDR_SIZE; - - return COLLECTION_HDR_SIZE * CHAR_BIT; -} - - /** * @brief compress short normal light flux (S_FX) data * @@ -894,9 +920,8 @@ static int compress_multi_entry_hdr(void **data, void **model, void **up_model, * value in the bitstream */ -static int compress_s_fx(const struct cmp_cfg *cfg) +static int compress_s_fx(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct s_fx *data_buf = cfg->input_buf; @@ -906,15 +931,10 @@ static int compress_s_fx(const struct cmp_cfg *cfg) struct s_fx model; struct encoder_setupt setup_exp_flag, setup_fx; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; @@ -961,9 +981,8 @@ static int compress_s_fx(const struct cmp_cfg *cfg) * value in the bitstream */ -static int compress_s_fx_efx(const struct cmp_cfg *cfg) +static int compress_s_fx_efx(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct s_fx_efx *data_buf = cfg->input_buf; @@ -973,15 +992,10 @@ static int compress_s_fx_efx(const struct cmp_cfg *cfg) struct s_fx_efx model; struct encoder_setupt setup_exp_flag, setup_fx, setup_efx; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; @@ -1036,9 +1050,8 @@ static int compress_s_fx_efx(const struct cmp_cfg *cfg) * value in the bitstream */ -static int compress_s_fx_ncob(const struct cmp_cfg *cfg) +static int compress_s_fx_ncob(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct s_fx_ncob *data_buf = cfg->input_buf; @@ -1048,15 +1061,10 @@ static int compress_s_fx_ncob(const struct cmp_cfg *cfg) struct s_fx_ncob model; struct encoder_setupt setup_exp_flag, setup_fx, setup_ncob; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; @@ -1117,9 +1125,8 @@ static int compress_s_fx_ncob(const struct cmp_cfg *cfg) * value in the bitstream */ -static int compress_s_fx_efx_ncob_ecob(const struct cmp_cfg *cfg) +static int compress_s_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct s_fx_efx_ncob_ecob *data_buf = cfg->input_buf; @@ -1130,15 +1137,10 @@ static int compress_s_fx_efx_ncob_ecob(const struct cmp_cfg *cfg) struct encoder_setupt setup_exp_flag, setup_fx, setup_ncob, setup_efx, setup_ecob; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; @@ -1221,9 +1223,8 @@ static int compress_s_fx_efx_ncob_ecob(const struct cmp_cfg *cfg) * value in the bitstream */ -static int compress_f_fx(const struct cmp_cfg *cfg) +static int compress_f_fx(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct f_fx *data_buf = cfg->input_buf; @@ -1233,15 +1234,10 @@ static int compress_f_fx(const struct cmp_cfg *cfg) struct f_fx model; struct encoder_setupt setup_fx; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; @@ -1280,9 +1276,8 @@ static int compress_f_fx(const struct cmp_cfg *cfg) * value in the bitstream */ -static int compress_f_fx_efx(const struct cmp_cfg *cfg) +static int compress_f_fx_efx(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct f_fx_efx *data_buf = cfg->input_buf; @@ -1292,15 +1287,10 @@ static int compress_f_fx_efx(const struct cmp_cfg *cfg) struct f_fx_efx model; struct encoder_setupt setup_fx, setup_efx; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; @@ -1347,9 +1337,8 @@ static int compress_f_fx_efx(const struct cmp_cfg *cfg) * value in the bitstream */ -static int compress_f_fx_ncob(const struct cmp_cfg *cfg) +static int compress_f_fx_ncob(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct f_fx_ncob *data_buf = cfg->input_buf; @@ -1359,15 +1348,10 @@ static int compress_f_fx_ncob(const struct cmp_cfg *cfg) struct f_fx_ncob model; struct encoder_setupt setup_fx, setup_ncob; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; @@ -1420,9 +1404,8 @@ static int compress_f_fx_ncob(const struct cmp_cfg *cfg) * value in the bitstream */ -static int compress_f_fx_efx_ncob_ecob(const struct cmp_cfg *cfg) +static int compress_f_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct f_fx_efx_ncob_ecob *data_buf = cfg->input_buf; @@ -1432,15 +1415,10 @@ static int compress_f_fx_efx_ncob_ecob(const struct cmp_cfg *cfg) struct f_fx_efx_ncob_ecob model; struct encoder_setupt setup_fx, setup_ncob, setup_efx, setup_ecob; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; @@ -1515,9 +1493,8 @@ static int compress_f_fx_efx_ncob_ecob(const struct cmp_cfg *cfg) * value in the bitstream */ -static int compress_l_fx(const struct cmp_cfg *cfg) +static int compress_l_fx(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct l_fx *data_buf = cfg->input_buf; @@ -1527,15 +1504,10 @@ static int compress_l_fx(const struct cmp_cfg *cfg) struct l_fx model; struct encoder_setupt setup_exp_flag, setup_fx, setup_fx_var; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; @@ -1590,9 +1562,8 @@ static int compress_l_fx(const struct cmp_cfg *cfg) * value in the bitstream */ -static int compress_l_fx_efx(const struct cmp_cfg *cfg) +static int compress_l_fx_efx(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct l_fx_efx *data_buf = cfg->input_buf; @@ -1602,15 +1573,10 @@ static int compress_l_fx_efx(const struct cmp_cfg *cfg) struct l_fx_efx model; struct encoder_setupt setup_exp_flag, setup_fx, setup_efx, setup_fx_var; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; @@ -1673,9 +1639,8 @@ static int compress_l_fx_efx(const struct cmp_cfg *cfg) * value in the bitstream */ -static int compress_l_fx_ncob(const struct cmp_cfg *cfg) +static int compress_l_fx_ncob(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct l_fx_ncob *data_buf = cfg->input_buf; @@ -1686,15 +1651,10 @@ static int compress_l_fx_ncob(const struct cmp_cfg *cfg) struct encoder_setupt setup_exp_flag, setup_fx, setup_ncob, setup_fx_var, setup_cob_var; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; @@ -1778,9 +1738,8 @@ static int compress_l_fx_ncob(const struct cmp_cfg *cfg) * value in the bitstream */ -static int compress_l_fx_efx_ncob_ecob(const struct cmp_cfg *cfg) +static int compress_l_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct l_fx_efx_ncob_ecob *data_buf = cfg->input_buf; @@ -1791,15 +1750,10 @@ static int compress_l_fx_efx_ncob_ecob(const struct cmp_cfg *cfg) struct encoder_setupt setup_exp_flag, setup_fx, setup_ncob, setup_efx, setup_ecob, setup_fx_var, setup_cob_var; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; @@ -1904,9 +1858,8 @@ static int compress_l_fx_efx_ncob_ecob(const struct cmp_cfg *cfg) * value in the bitstream */ -static int compress_offset(const struct cmp_cfg *cfg) +static int compress_offset(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct offset *data_buf = cfg->input_buf; @@ -1916,15 +1869,10 @@ static int compress_offset(const struct cmp_cfg *cfg) struct offset model; struct encoder_setupt setup_mean, setup_var; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; @@ -1944,9 +1892,9 @@ static int compress_offset(const struct cmp_cfg *cfg) variance_bits_used = cfg->max_used_bits->nc_offset_variance; break; } - configure_encoder_setup(&setup_mean, cfg->cmp_par_mean, cfg->spill_mean, + configure_encoder_setup(&setup_mean, cfg->cmp_par_offset_mean, cfg->spill_offset_mean, cfg->round, mean_bits_used, cfg); - configure_encoder_setup(&setup_var, cfg->cmp_par_variance, cfg->spill_variance, + configure_encoder_setup(&setup_var, cfg->cmp_par_offset_variance, cfg->spill_offset_variance, cfg->round, variance_bits_used, cfg); } @@ -1985,9 +1933,8 @@ static int compress_offset(const struct cmp_cfg *cfg) * value in the bitstream */ -static int compress_background(const struct cmp_cfg *cfg) +static int compress_background(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct background *data_buf = cfg->input_buf; @@ -1997,15 +1944,10 @@ static int compress_background(const struct cmp_cfg *cfg) struct background model; struct encoder_setupt setup_mean, setup_var, setup_pix; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; @@ -2027,11 +1969,11 @@ static int compress_background(const struct cmp_cfg *cfg) pixels_error_used_bits = cfg->max_used_bits->nc_background_outlier_pixels; break; } - configure_encoder_setup(&setup_mean, cfg->cmp_par_mean, cfg->spill_mean, + configure_encoder_setup(&setup_mean, cfg->cmp_par_background_mean, cfg->spill_background_mean, cfg->round, mean_used_bits, cfg); - configure_encoder_setup(&setup_var, cfg->cmp_par_variance, cfg->spill_variance, + configure_encoder_setup(&setup_var, cfg->cmp_par_background_variance, cfg->spill_background_variance, cfg->round, varinace_used_bits, cfg); - configure_encoder_setup(&setup_pix, cfg->cmp_par_pixels_error, cfg->spill_pixels_error, + configure_encoder_setup(&setup_pix, cfg->cmp_par_background_pixels_error, cfg->spill_background_pixels_error, cfg->round, pixels_error_used_bits, cfg); } @@ -2077,9 +2019,8 @@ static int compress_background(const struct cmp_cfg *cfg) * value in the bitstream */ -static int compress_smearing(const struct cmp_cfg *cfg) +static int compress_smearing(const struct cmp_cfg *cfg, int stream_len) { - int stream_len = 0; size_t i; struct smearing *data_buf = cfg->input_buf; @@ -2089,25 +2030,20 @@ static int compress_smearing(const struct cmp_cfg *cfg) struct smearing model; struct encoder_setupt setup_mean, setup_var_mean, setup_pix; - if (model_mode_is_used(cfg->cmp_mode)) - up_model_buf = cfg->icu_new_model_buf; - - stream_len = compress_multi_entry_hdr((void **)&data_buf, (void **)&model_buf, - (void **)&up_model_buf, cfg->icu_output_buf); - if (model_mode_is_used(cfg->cmp_mode)) { model = model_buf[0]; next_model_p = &model_buf[1]; + up_model_buf = cfg->icu_new_model_buf; } else { memset(&model, 0, sizeof(model)); next_model_p = data_buf; } - configure_encoder_setup(&setup_mean, cfg->cmp_par_mean, cfg->spill_mean, + configure_encoder_setup(&setup_mean, cfg->cmp_par_smearing_mean, cfg->spill_smearing_mean, cfg->round, cfg->max_used_bits->smearing_mean, cfg); - configure_encoder_setup(&setup_var_mean, cfg->cmp_par_variance, cfg->spill_variance, + configure_encoder_setup(&setup_var_mean, cfg->cmp_par_smearing_variance, cfg->spill_smearing_variance, cfg->round, cfg->max_used_bits->smearing_variance_mean, cfg); - configure_encoder_setup(&setup_pix, cfg->cmp_par_pixels_error, cfg->spill_pixels_error, + configure_encoder_setup(&setup_pix, cfg->cmp_par_smearing_pixels_error, cfg->spill_smearing_pixels_error, cfg->round, cfg->max_used_bits->smearing_outlier_pixels, cfg); for (i = 0;; i++) { @@ -2168,7 +2104,7 @@ static int pad_bitstream(const struct cmp_cfg *cfg, int cmp_size) return cmp_size; /* maximum length of the bitstream/icu_output_buf in bits */ - output_buf_len_bits = cmp_buffer_length_to_bits(cfg->buffer_length, cfg->data_type); + output_buf_len_bits = cmp_buffer_length_to_bits(cfg->buffer_length); n_pad_bits = 32 - ((unsigned int)cmp_size & 0x1FU); if (n_pad_bits < 32) { @@ -2182,48 +2118,50 @@ static int pad_bitstream(const struct cmp_cfg *cfg, int cmp_size) } -/** - * @brief compress data on the ICU in software - * - * @param cfg pointer to a compression configuration (created with the - * cmp_cfg_icu_create() function, setup with the cmp_cfg_xxx() functions) - * - * @note the validity of the cfg structure is checked before the compression is - * started - * - * @returns the bit length of the bitstream on success; negative on error, - * CMP_ERROR_SMALL_BUF (-2) if the compressed data buffer is too small to - * hold the whole compressed data, CMP_ERROR_HIGH_VALUE (-3) if a data or - * model value is bigger than the max_used_bits parameter allows (set with - * the cmp_set_max_used_bits() function) - */ +/* TODO: doc string */ -int icu_compress_data(const struct cmp_cfg *cfg) +static int compress_data_internal(const struct cmp_cfg *cfg, int stream_len) { int bitsize = 0; if (!cfg) return -1; + if (stream_len < 0) + return stream_len; + if (cfg->samples == 0) /* nothing to compress we are done*/ return 0; - if (raw_mode_is_used(cfg->cmp_mode)) - if (cfg->samples > cfg->buffer_length) + if (stream_len & 0x7) { + printf("Error: The stream_len parameter must be a multiple of 8.\n"); + return -1; + } + + if (raw_mode_is_used(cfg->cmp_mode) && cfg->icu_output_buf) + if (((uint32_t)stream_len >> 3) + cfg->samples * size_of_a_sample(cfg->data_type) > cfg->buffer_length) return CMP_ERROR_SMALL_BUF; if (cmp_cfg_icu_is_invalid(cfg)) return -1; if (raw_mode_is_used(cfg->cmp_mode)) { - uint32_t raw_size = cmp_cal_size_of_data(cfg->samples, cfg->data_type); + uint32_t raw_size = cfg->samples * (uint32_t)size_of_a_sample(cfg->data_type); if (cfg->icu_output_buf) { - memcpy(cfg->icu_output_buf, cfg->input_buf, raw_size); - if (cmp_input_big_to_cpu_endianness(cfg->icu_output_buf, raw_size, cfg->data_type)) + uint32_t todo = raw_size; + uint8_t *p = (uint8_t *)cfg->icu_output_buf + (stream_len >> 3); + + memcpy(p, cfg->input_buf, raw_size); + /* TODO: fix cmp_input_big_to_cpu_endianness */ + if (stream_len > 0) { + p -= COLLECTION_HDR_SIZE; + todo += COLLECTION_HDR_SIZE; + } + if (cmp_input_big_to_cpu_endianness(p, todo, cfg->data_type)) return -1; } - bitsize = (int)raw_size * CHAR_BIT; /* convert to bits */ + bitsize += stream_len + (int)raw_size*8; /* convert to bits */ } else { if (cfg->icu_output_buf && cfg->samples/3 > cfg->buffer_length) debug_print("Warning: The size of the compressed_data buffer is 3 times smaller than the data_to_compress. This is probably unintended.\n"); @@ -2235,58 +2173,58 @@ int icu_compress_data(const struct cmp_cfg *cfg) case DATA_TYPE_SAT_IMAGETTE_ADAPTIVE: case DATA_TYPE_F_CAM_IMAGETTE: case DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE: - bitsize = compress_imagette(cfg); + bitsize = compress_imagette(cfg, stream_len); break; case DATA_TYPE_S_FX: - bitsize = compress_s_fx(cfg); + bitsize = compress_s_fx(cfg, stream_len); break; case DATA_TYPE_S_FX_EFX: - bitsize = compress_s_fx_efx(cfg); + bitsize = compress_s_fx_efx(cfg, stream_len); break; case DATA_TYPE_S_FX_NCOB: - bitsize = compress_s_fx_ncob(cfg); + bitsize = compress_s_fx_ncob(cfg, stream_len); break; case DATA_TYPE_S_FX_EFX_NCOB_ECOB: - bitsize = compress_s_fx_efx_ncob_ecob(cfg); + bitsize = compress_s_fx_efx_ncob_ecob(cfg, stream_len); break; case DATA_TYPE_F_FX: - bitsize = compress_f_fx(cfg); + bitsize = compress_f_fx(cfg, stream_len); break; case DATA_TYPE_F_FX_EFX: - bitsize = compress_f_fx_efx(cfg); + bitsize = compress_f_fx_efx(cfg, stream_len); break; case DATA_TYPE_F_FX_NCOB: - bitsize = compress_f_fx_ncob(cfg); + bitsize = compress_f_fx_ncob(cfg, stream_len); break; case DATA_TYPE_F_FX_EFX_NCOB_ECOB: - bitsize = compress_f_fx_efx_ncob_ecob(cfg); + bitsize = compress_f_fx_efx_ncob_ecob(cfg, stream_len); break; case DATA_TYPE_L_FX: - bitsize = compress_l_fx(cfg); + bitsize = compress_l_fx(cfg, stream_len); break; case DATA_TYPE_L_FX_EFX: - bitsize = compress_l_fx_efx(cfg); + bitsize = compress_l_fx_efx(cfg, stream_len); break; case DATA_TYPE_L_FX_NCOB: - bitsize = compress_l_fx_ncob(cfg); + bitsize = compress_l_fx_ncob(cfg, stream_len); break; case DATA_TYPE_L_FX_EFX_NCOB_ECOB: - bitsize = compress_l_fx_efx_ncob_ecob(cfg); + bitsize = compress_l_fx_efx_ncob_ecob(cfg, stream_len); break; case DATA_TYPE_OFFSET: case DATA_TYPE_F_CAM_OFFSET: - bitsize = compress_offset(cfg); + bitsize = compress_offset(cfg, stream_len); break; case DATA_TYPE_BACKGROUND: case DATA_TYPE_F_CAM_BACKGROUND: - bitsize = compress_background(cfg); + bitsize = compress_background(cfg, stream_len); break; case DATA_TYPE_SMEARING: - bitsize = compress_smearing(cfg); + bitsize = compress_smearing(cfg, stream_len); break; /* LCOV_EXCL_START */ @@ -2302,3 +2240,532 @@ int icu_compress_data(const struct cmp_cfg *cfg) return bitsize; } + + +/** + * @brief compress data on the ICU in software + * + * @param cfg pointer to a compression configuration (created with the + * cmp_cfg_icu_create() function, setup with the cmp_cfg_xxx() functions) + * + * @note the validity of the cfg structure is checked before the compression is + * started + * + * @returns the bit length of the bitstream on success; negative on error, + * CMP_ERROR_SMALL_BUF (-2) if the compressed data buffer is too small to + * hold the whole compressed data, CMP_ERROR_HIGH_VALUE (-3) if a data or + * model value is bigger than the max_used_bits parameter allows (set with + * the cmp_set_max_used_bits() function) + */ + +int icu_compress_data(const struct cmp_cfg *cfg) +{ + struct cmp_cfg cfg_cpy; + int dst_capacity_used = 0; + + if (cfg) { + if (cfg->samples == 0) + return 0; + cfg_cpy = *cfg; + cfg_cpy.buffer_length = cmp_cal_size_of_data(cfg->buffer_length, cfg->data_type); + if (!cfg_cpy.buffer_length) + return -1; + + if (!rdcu_supported_data_type_is_used(cfg->data_type) && !cmp_data_type_is_invalid(cfg->data_type)) { + if (cfg->icu_new_model_buf) { + if (cfg->input_buf) + memcpy(cfg->icu_new_model_buf, cfg->input_buf, COLLECTION_HDR_SIZE); + cfg_cpy.icu_new_model_buf = (uint8_t *)cfg_cpy.icu_new_model_buf + COLLECTION_HDR_SIZE; + } + if (cfg->icu_output_buf && cfg->input_buf && cfg->buffer_length) + memcpy(cfg->icu_output_buf, cfg->input_buf, COLLECTION_HDR_SIZE); + if (cfg->input_buf) + cfg_cpy.input_buf = (uint8_t *)cfg->input_buf + COLLECTION_HDR_SIZE; + if (cfg->model_buf) + cfg_cpy.model_buf = (uint8_t *)cfg->model_buf + COLLECTION_HDR_SIZE; + dst_capacity_used = COLLECTION_HDR_SIZE*8; + } + return compress_data_internal(&cfg_cpy, dst_capacity_used); + } + return compress_data_internal(NULL, dst_capacity_used); +} + + +/* TODO: doc string */ + +static uint32_t cmp_guess_good_spill(uint32_t golomb_par) +{ + if (!golomb_par) + return 0; + return cmp_icu_max_spill(golomb_par); +} + + +/* TODO: doc string */ + +static int set_cmp_col_size(uint8_t *p, int cmp_col_size) +{ + uint16_t v = (uint16_t)cmp_col_size; + + if (cmp_col_size > UINT16_MAX) + return -1; + + v -= COLLECTION_HDR_SIZE+2; + if (p) { + memset(p, v >> 8, 1); + memset(p+1, v & 0xFF, 1); + } + + return 0; +} + + +/* TODO: doc string */ + +static int cmp_collection(uint8_t *col, uint8_t *model, uint8_t *updated_model, uint32_t *dst, + uint32_t dst_capacity, struct cmp_cfg *cfg, int dst_size) +{ + int dst_size_begin = dst_size; + int dst_size_bits; + + if (dst_size < 0) + return dst_size; + if (!col) + return -1; + + if (cfg->cmp_mode != CMP_MODE_RAW) { + /* we put the compressed data size be */ + dst_size += CMP_COLLECTION_FILD_SIZE; + } + + /* we do not compress the collection header, we simply copy the header + * into the compressed data + */ + if (dst) { + if ((uint32_t)dst_size + COLLECTION_HDR_SIZE > dst_capacity) + return CMP_ERROR_SMALL_BUF; + memcpy((uint8_t *)dst + dst_size, col, COLLECTION_HDR_SIZE); + if (model_mode_is_used(cfg->cmp_mode) && cfg->icu_new_model_buf) + memcpy(cfg->icu_new_model_buf, col, COLLECTION_HDR_SIZE); + } + dst_size += COLLECTION_HDR_SIZE; + + /* prepare the different buffers */ + cfg->icu_output_buf = dst; + cfg->input_buf = col + COLLECTION_HDR_SIZE; + if (model) + cfg->model_buf = model + COLLECTION_HDR_SIZE; + if (updated_model) + cfg->icu_new_model_buf = updated_model + COLLECTION_HDR_SIZE; + + { + struct collection_hdr *col_hdr = (struct collection_hdr *)col; + uint8_t subservice = cmp_col_get_subservice(col_hdr); + uint16_t col_data_length = cmp_col_get_data_length(col_hdr); + + + cfg->data_type = convert_subservice_to_cmp_data_type(subservice); + + if (!size_of_a_sample(cfg->data_type)) + return -1; + if (col_data_length % size_of_a_sample(cfg->data_type)) + return -1; + cfg->samples = col_data_length/size_of_a_sample(cfg->data_type); + if ((dst && (uint32_t)dst_size + col_data_length > dst_capacity) || + cfg->cmp_mode == CMP_MODE_RAW) { + cfg->buffer_length = dst_capacity; + dst_size_bits = compress_data_internal(cfg, dst_size<<3); + if (dst_size < 0) + return dst_size_bits; + } else { + /* we limit the size of the compressed data buffer */ + cfg->buffer_length = (uint32_t)dst_size + col_data_length-1; + dst_size_bits = compress_data_internal(cfg, dst_size<<3); + if (dst_size_bits == CMP_ERROR_SMALL_BUF || + (!dst && (int)cmp_bit_to_byte((unsigned int)dst_size_bits)-dst_size > col_data_length)) { /* if dst == NULL icu_compress_data_2 will not return a CMP_ERROR_SMALL_BUF */ + enum cmp_mode cmp_mode_cpy = cfg->cmp_mode; + + cfg->buffer_length = (uint32_t)dst_size + col_data_length; + cfg->cmp_mode = CMP_MODE_RAW; + dst_size_bits = compress_data_internal(cfg, dst_size<<3); + cfg->cmp_mode = cmp_mode_cpy; + } + } + if (dst_size_bits < 0) + return dst_size_bits; + } + + dst_size = (int)cmp_bit_to_byte((unsigned int)dst_size_bits); /*TODO: fix casts */ + if (dst && cfg->cmp_mode != CMP_MODE_RAW) + if (set_cmp_col_size((uint8_t *)dst+dst_size_begin, dst_size-dst_size_begin)) + return -1; + + return dst_size; +} + + +static int cmp_ent_build_chunk_header(struct cmp_entity *ent, uint32_t chunk_size, + const struct cmp_cfg *cfg, uint64_t start_timestamp, + int32_t cmp_ent_size_byte) +{ + if (ent) { /* setup the compressed entity header */ + int err = 0; + + if (cmp_ent_size_byte < 0) + cmp_ent_size_byte = 0; + err |= cmp_ent_set_version_id(ent, version_identifier); + err |= cmp_ent_set_size(ent, (uint32_t)cmp_ent_size_byte); + if (cmp_ent_set_original_size(ent, chunk_size)) { + debug_print("Error: The size of the chunk is too.\n"); + return -1; + } + err |= cmp_ent_set_start_timestamp(ent, start_timestamp); + err |= cmp_ent_set_data_type(ent, DATA_TYPE_CHUNK, cfg->cmp_mode == CMP_MODE_RAW); + err |= cmp_ent_set_cmp_mode(ent, cfg->cmp_mode); + err |= cmp_ent_set_model_value(ent, cfg->model_value); + err |= cmp_ent_set_model_id(ent, 0); + err |= cmp_ent_set_model_counter(ent, 0); + if (cfg->max_used_bits) + err |= cmp_ent_set_max_used_bits_version(ent, cfg->max_used_bits->version); + else + err |= cmp_ent_set_max_used_bits_version(ent, 0); + err |= cmp_ent_set_lossy_cmp_par(ent, cfg->round); + if (cfg->cmp_mode != CMP_MODE_RAW) { + err |= cmp_ent_set_non_ima_spill1(ent, cfg->spill_par_1); + err |= cmp_ent_set_non_ima_cmp_par1(ent, cfg->cmp_par_1); + err |= cmp_ent_set_non_ima_spill2(ent, cfg->spill_par_2); + err |= cmp_ent_set_non_ima_cmp_par2(ent, cfg->cmp_par_2); + err |= cmp_ent_set_non_ima_spill3(ent, cfg->spill_par_3); + err |= cmp_ent_set_non_ima_cmp_par3(ent, cfg->cmp_par_3); + err |= cmp_ent_set_non_ima_spill4(ent, cfg->spill_par_4); + err |= cmp_ent_set_non_ima_cmp_par4(ent, cfg->cmp_par_4); + err |= cmp_ent_set_non_ima_spill5(ent, cfg->spill_par_5); + err |= cmp_ent_set_non_ima_cmp_par5(ent, cfg->cmp_par_5); + err |= cmp_ent_set_non_ima_spill6(ent, cfg->spill_par_6); + err |= cmp_ent_set_non_ima_cmp_par6(ent, cfg->cmp_par_6); + } + err |= cmp_ent_set_end_timestamp(ent, get_timestamp()); + if (err) + return -1; + } + return NON_IMAGETTE_HEADER_SIZE; +} + + +/* TODO: doc string; ref document */ + +static enum chunk_type get_chunk_type(uint16_t subservice) +{ + enum chunk_type chunk_type = CHUNK_TYPE_UNKNOWN; + + switch (subservice) { + case SST_NCxx_S_SCIENCE_IMAGETTE: + chunk_type = CHUNK_TYPE_NCAM_IMGAETTE; + break; + case SST_NCxx_S_SCIENCE_SAT_IMAGETTE: + chunk_type = CHUNK_TYPE_SAT_IMGAETTE; + break; + case SST_NCxx_S_SCIENCE_OFFSET: + case SST_NCxx_S_SCIENCE_BACKGROUND: + chunk_type = CHUNK_TYPE_OFFSET_BACKGROUND; + break; + case SST_NCxx_S_SCIENCE_SMEARING: + chunk_type = CHUNK_TYPE_SMEARING; + break; + case SST_NCxx_S_SCIENCE_S_FX: + case SST_NCxx_S_SCIENCE_S_FX_EFX: + case SST_NCxx_S_SCIENCE_S_FX_NCOB: + case SST_NCxx_S_SCIENCE_S_FX_EFX_NCOB_ECOB: + chunk_type = CHUNK_TYPE_SHORT_CADENCE; + break; + case SST_NCxx_S_SCIENCE_L_FX: + case SST_NCxx_S_SCIENCE_L_FX_EFX: + case SST_NCxx_S_SCIENCE_L_FX_NCOB: + case SST_NCxx_S_SCIENCE_L_FX_EFX_NCOB_ECOB: + chunk_type = CHUNK_TYPE_SHORT_CADENCE; + break; + case SST_NCxx_S_SCIENCE_F_FX: + case SST_NCxx_S_SCIENCE_F_FX_EFX: + case SST_NCxx_S_SCIENCE_F_FX_NCOB: + case SST_NCxx_S_SCIENCE_F_FX_EFX_NCOB_ECOB: + debug_print("Error: No chunk is defined for fast cadence subservices\n"); + chunk_type = CHUNK_TYPE_UNKNOWN; + break; + default: + debug_print("Error: Unknown subservice: %i.\n", subservice); + chunk_type = CHUNK_TYPE_UNKNOWN; + break; + }; + + return chunk_type; +} + + +static enum chunk_type cmp_col_get_chunk_type(const struct collection_hdr *col) +{ + return get_chunk_type(cmp_col_get_subservice(col)); +} + + +static int read_in_cmp_par(const struct cmp_par *par, enum chunk_type chunk_type, + struct cmp_cfg *cfg) +{ + memset(cfg, 0, sizeof(struct cmp_cfg)); + + cfg->cmp_mode = par->cmp_mode; + cfg->model_value = par->model_value; + cfg->round = par->lossy_par; + cfg->max_used_bits = &MAX_USED_BITS_SAFE; + + switch (chunk_type) { + case CHUNK_TYPE_NCAM_IMGAETTE: + cfg->cmp_par_imagette = par->nc_imagette; + break; + case CHUNK_TYPE_SAT_IMGAETTE: + cfg->cmp_par_imagette = par->saturated_imagette; + break; + case CHUNK_TYPE_SHORT_CADENCE: + cfg->cmp_par_exp_flags = par->s_exp_flags; + cfg->cmp_par_fx = par->s_fx; + cfg->cmp_par_ncob = par->s_ncob; + cfg->cmp_par_efx = par->s_efx; + cfg->cmp_par_ecob = par->s_ecob; + break; + case CHUNK_TYPE_LONG_CADENCE: + cfg->cmp_par_exp_flags = par->l_exp_flags; + cfg->cmp_par_fx = par->l_fx; + cfg->cmp_par_ncob = par->l_ncob; + cfg->cmp_par_efx = par->l_efx; + cfg->cmp_par_ecob = par->l_ecob; + cfg->cmp_par_fx_cob_variance = par->l_fx_cob_variance; + break; + case CHUNK_TYPE_OFFSET_BACKGROUND: + cfg->cmp_par_offset_mean = par->nc_offset_mean; + cfg->cmp_par_offset_variance = par->nc_offset_variance; + + cfg->cmp_par_background_mean = par->nc_background_mean; + cfg->cmp_par_background_variance = par->nc_background_variance; + cfg->cmp_par_background_pixels_error = par->nc_background_outlier_pixels; + break; + + case CHUNK_TYPE_SMEARING: + cfg->cmp_par_smearing_mean = par->smearing_mean; + cfg->cmp_par_smearing_variance = par->smearing_variance_mean; + cfg->cmp_par_smearing_pixels_error = par->smearing_outlier_pixels; + break; + + case CHUNK_TYPE_F_CHAIN: + cfg->cmp_par_imagette = par->fc_imagette; + + cfg->cmp_par_offset_mean = par->fc_offset_mean; + cfg->cmp_par_offset_variance = par->fc_offset_variance; + + cfg->cmp_par_background_mean = par->fc_background_mean; + cfg->cmp_par_background_variance = par->fc_background_variance; + cfg->cmp_par_background_pixels_error = par->fc_background_outlier_pixels; + break; + case CHUNK_TYPE_UNKNOWN: + default: + return -1; + }; + + cfg->spill_par_1 = cmp_guess_good_spill(cfg->cmp_par_1); + cfg->spill_par_2 = cmp_guess_good_spill(cfg->cmp_par_2); + cfg->spill_par_3 = cmp_guess_good_spill(cfg->cmp_par_3); + cfg->spill_par_4 = cmp_guess_good_spill(cfg->cmp_par_4); + cfg->spill_par_5 = cmp_guess_good_spill(cfg->cmp_par_5); + cfg->spill_par_6 = cmp_guess_good_spill(cfg->cmp_par_6); + return 0; +} + + +/** + * @brief initialise the compress_chunk() function + * + * If not initialised the compress_chunk() function sets the timestamps and + * version_id in the compression entity header to zero + * + * @param return_timestamp pointer to a function returning a current 48-bit + * timestamp + * @param version_id version identifier of the applications software + */ + +void compress_chunk_init(uint64_t(return_timestamp)(void), uint32_t version_id) +{ + if (return_timestamp) + get_timestamp = return_timestamp; + + version_identifier = version_id; +} + + +/** + * @brief compress a data chunk consisting of put together data collections + * + * @param chunk pointer to the chunk to be compressed + * @param chunk_size byte size of the chunk + * @param chunk_model pointer to a model of a chunk; has the same size + * as the chunk (can be NULL if no model compression + * mode is used) + * @param updated_chunk_model pointer to store the updated model for the next + * model mode compression; has the same size as the + * chunk (can be the same as the model_of_data + * buffer for in-place update or NULL if updated + * model is not needed) + * @param dst destination pointer to the compressed data + * buffer; has to be 4-byte aligned (can be NULL) + * @param dst_capacity capacity of the dst buffer; it's recommended to + * provide a dst_capacity >= + * compress_chunk_cmp_size_bound(chunk, chunk_size) + * as it eliminates one potential failure scenario: + * not enough space in the dst buffer to write the + * compressed data; size is round down to a multiple + * of 4 + * @returns the byte size of the compressed_data buffer on success; negative on + * error, CMP_ERROR_SMALL_BUF (-2) if the compressed data buffer is too + * small to hold the whole compressed data + */ + +int compress_chunk(uint32_t *chunk, uint32_t chunk_size, uint32_t *chunk_model, + uint32_t *updated_chunk_model, uint32_t *dst, uint32_t dst_capacity, + const struct cmp_par *cmp_par) +{ + uint64_t start_timestamp = get_timestamp(); + size_t read_bytes; + struct collection_hdr *col = (struct collection_hdr *)chunk; + int cmp_size_byte; /* size of the compressed data in bytes */ + enum chunk_type chunk_type; + struct cmp_cfg cfg; + int err; + + if (!chunk) { + debug_print("Error: Pointer to the data chunk is NULL. No data no compression.\n"); + return -1; + } + if (chunk_size < COLLECTION_HDR_SIZE) { + debug_print("Error: The chunk size is smaller than the minimum size.\n"); + return -1; + } + if (chunk_size > CMP_ENTITY_MAX_ORIGINAL_SIZE) { + debug_print("Error: The chunk size is bigger than the maximum allowed chunk size.\n"); + return -1; + } + + /* we will build the compression header after the compression of the chunk */ + if (cmp_par->cmp_mode == CMP_MODE_RAW) + cmp_size_byte = GENERIC_HEADER_SIZE; + else + cmp_size_byte = NON_IMAGETTE_HEADER_SIZE; + if (dst) { + if (dst_capacity < (size_t)cmp_size_byte) { + debug_print("Error: The destination capacity is smaller than the minimum compression entity size.\n"); + return CMP_ERROR_SMALL_BUF; + } + memset(dst, 0, (size_t)cmp_size_byte); + } + + chunk_type = cmp_col_get_chunk_type(col); + if (read_in_cmp_par(cmp_par, chunk_type, &cfg)) + return -1; + + for (read_bytes = 0; + read_bytes < chunk_size - COLLECTION_HDR_SIZE; + read_bytes += cmp_col_get_size(col)) { + /* setup pointers for the next collection we want to compress */ + col = (struct collection_hdr *)((uint8_t *)chunk + read_bytes); /* TODO: ARE ALL COLLECTION 4 BYTE ALLIED? */ + if (chunk_model) + chunk_model = (uint8_t *)chunk_model + read_bytes; /* TODO: ARE ALL COLLECTION 4 BYTE ALLIED?*/ + if (updated_chunk_model) + updated_chunk_model = (uint8_t *)updated_chunk_model + read_bytes; /* TODO: ARE ALL COLLECTION 4 BYTE ALLIED?*/ + + if (cmp_col_get_chunk_type(col) != chunk_type) { + debug_print("Error: The chunk contains collections with an incompatible mix of subservices.\n"); + return -1; + } + + cmp_size_byte = cmp_collection((uint8_t *)col, + (uint8_t *)chunk_model, + (uint8_t *)updated_chunk_model, + dst, dst_capacity, &cfg, cmp_size_byte); + } + + if (read_bytes != chunk_size) { + debug_print("Error: The sum of the compressed collections does not match the size of the data in the compression header.\n"); + return -1; + } + + err = cmp_ent_build_chunk_header((struct cmp_entity *)dst, chunk_size, &cfg, + start_timestamp, cmp_size_byte); + if (err < 0) + return err; + + return cmp_size_byte; +} + + +/** + * @brief returns the maximum compressed size in a worst case scenario + * + * In case the input data is not compressible + * This function is primarily useful for memory allocation purposes + * (destination buffer size). + * + * @note if the number of collections is known you can use the + * COMPRESS_CHUNK_BOUND macro for compilation-time evaluation + * (stack memory allocation for example) + * + * @param chunk pointer to the chunk you want compress + * @param chunk_size size of the chunk in bytes + * + * @returns maximum compressed size for a chunk compression; 0 on error + */ + +uint32_t compress_chunk_cmp_size_bound(void *chunk, uint32_t chunk_size) +{ + int32_t read_bytes; + uint32_t num_col = 0; + + if (chunk_size > CMP_ENTITY_MAX_ORIGINAL_SIZE-NON_IMAGETTE_HEADER_SIZE-CMP_COLLECTION_FILD_SIZE) { + debug_print("Error: The chunk size is bigger than the maximum allowed chunk size.\n"); + return 0; + } + + for (read_bytes = 0; + read_bytes < (int32_t)chunk_size-COLLECTION_HDR_SIZE; + read_bytes += cmp_col_get_size((struct collection_hdr *)((uint8_t *)chunk + read_bytes))) + num_col++; + + + if ((uint32_t)read_bytes != chunk_size) { + debug_print("Error: The sum of the compressed collections does not match the size of the data in the compression header.\n"); + return 0; + } + + return COMPRESS_CHUNK_BOUND(chunk_size, num_col); +} + + +/** + * @brief set the model id and model counter in the compression entity header + * + * @param dst pointer to the compressed data starting with a + * compression entity header + * @param dst_size byte size of the dst buffer + * @param model_id model identifier; for identifying entities that originate + * from the same starting model + * @param model_counter model_counter; counts how many times the model was + * updated; for non model mode compression use 0 + * + * @returns 0 on success, otherwise error + */ + +int compress_chunk_set_model_id_and_counter(uint32_t *dst, int dst_size, + uint16_t model_id, uint8_t model_counter) +{ + if (dst_size < NON_IMAGETTE_HEADER_SIZE) + return 1; + + return cmp_ent_set_model_id((struct cmp_entity *)dst, model_id) || + cmp_ent_set_model_counter((struct cmp_entity *)dst, model_counter); + +} diff --git a/programs/cmp_io.c b/programs/cmp_io.c index 4fadaa6a4de9f44acb614cd2b110d6ac9f9b529a..7db309d195154e93ea653b51c6db1fa2fe71ea98 100644 --- a/programs/cmp_io.c +++ b/programs/cmp_io.c @@ -676,6 +676,7 @@ static int parse_cfg(FILE *fp, struct cmp_cfg *cfg) return -1; continue; } +#if 0 if (!strcmp(token1, "cmp_par_mean")) { if (atoui32(token1, token2, &cfg->cmp_par_mean)) return -1; @@ -706,6 +707,7 @@ static int parse_cfg(FILE *fp, struct cmp_cfg *cfg) return -1; continue; } +#endif if (!strcmp(token1, "rdcu_data_adr")) { int i = sram_addr_to_int(token2); @@ -1662,7 +1664,7 @@ static void write_cfg_internal(FILE *fp, const struct cmp_cfg *cfg) fprintf(fp, "\n"); fprintf(fp, "#-------------------------------------------------------------------------------\n"); } - +#if 0 if (cmp_aux_data_type_is_used(cfg->data_type)) { fprintf(fp, "# mean compression parameter\n"); fprintf(fp, "\n"); @@ -1698,6 +1700,7 @@ static void write_cfg_internal(FILE *fp, const struct cmp_cfg *cfg) fprintf(fp, "#-------------------------------------------------------------------------------\n"); } } +#endif if (cmp_fx_cob_data_type_is_used(cfg->data_type)) { struct fx_cob_par needed_pars; diff --git a/test/cmp_data_types/test_cmp_data_types.c b/test/cmp_data_types/test_cmp_data_types.c index 2d748032445c5f8aea58b09ada285b3b4e220d4f..a09c50c568a3787ddb92a8a03f140e4bd255dcda 100644 --- a/test/cmp_data_types/test_cmp_data_types.c +++ b/test/cmp_data_types/test_cmp_data_types.c @@ -58,7 +58,7 @@ void test_cmp_col_get_and_set(void) err = cmp_col_set_timestamp(col, 0x000102030405); TEST_ASSERT_FALSE(err); timestamp = cmp_col_get_timestamp(col); - TEST_ASSERT_EQUAL_HEX64(0x000102030405, timestamp); + TEST_ASSERT_EQUAL(0x000102030405, timestamp); /* error cases */ err = cmp_col_set_timestamp(NULL, 0x000102030405); TEST_ASSERT_TRUE(err); @@ -85,6 +85,7 @@ void test_cmp_col_get_and_set(void) err = cmp_col_set_pkt_type(NULL, 1); TEST_ASSERT_TRUE(err); err = cmp_col_set_pkt_type(col, 0); + TEST_ASSERT_FALSE(err); err = cmp_col_set_subservice(col, 0x3F); TEST_ASSERT_FALSE(err); @@ -97,6 +98,7 @@ void test_cmp_col_get_and_set(void) err = cmp_col_set_subservice(NULL, 0x3F); TEST_ASSERT_TRUE(err); err = cmp_col_set_subservice(col, 0); + TEST_ASSERT_FALSE(err); err = cmp_col_set_ccd_id(col, 0x3); TEST_ASSERT_FALSE(err); @@ -109,6 +111,7 @@ void test_cmp_col_get_and_set(void) err = cmp_col_set_ccd_id(NULL, 0x3); TEST_ASSERT_TRUE(err); err = cmp_col_set_ccd_id(col, 0); + TEST_ASSERT_FALSE(err); err = cmp_col_set_sequence_num(col, 0x7F); TEST_ASSERT_FALSE(err); @@ -130,12 +133,12 @@ void test_cmp_col_get_and_set(void) err = cmp_col_set_col_id(NULL, 0x0809); TEST_ASSERT_TRUE(err); - err = cmp_col_set_length(col, 0x0A0B); + err = cmp_col_set_data_length(col, 0x0A0B); TEST_ASSERT_FALSE(err); - collection_length = cmp_col_get_length(col); + collection_length = cmp_col_get_data_length(col); TEST_ASSERT_EQUAL_HEX16(0x0A0B, collection_length); /* error cases */ - err = cmp_col_set_length(NULL, 0x0A0B); + err = cmp_col_set_data_length(NULL, 0x0A0B); TEST_ASSERT_TRUE(err); for (i = 0; i < sizeof(struct collection_hdr); i++) { diff --git a/test/cmp_decmp/test_cmp_decmp.c b/test/cmp_decmp/test_cmp_decmp.c index 1ed42df084d75b86778e20958926c360df8aada5..1331f3de4fb4c775da781436081ffbffa51ca53c 100644 --- a/test/cmp_decmp/test_cmp_decmp.c +++ b/test/cmp_decmp/test_cmp_decmp.c @@ -28,6 +28,7 @@ #include "../test_common/test_common.h" #include <cmp_icu.h> +#include <cmp_chunk.h> #include <decmp.h> #include <cmp_data_types.h> #include <leon_inttypes.h> @@ -43,6 +44,7 @@ #define IMAX_BITS(m) ((m)/((m)%255+1) / 255%255*8 + 7-86/((m)%255+12)) #define RAND_MAX_WIDTH IMAX_BITS(RAND_MAX) +#define ROUND_UP_TO_MULTIPLE_OF_4(x) (((x) + 3) & ~3) /** * @brief Seeds the pseudo-random number generator used by rand() @@ -67,246 +69,400 @@ void setUp(void) } -static void gen_ima_data(uint16_t *data, uint32_t samples, const struct cmp_max_used_bits *max_used_bits) +static size_t gen_ima_data(uint16_t *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) - data[i] = (uint16_t)cmp_rand_nbits(max_used_bits->nc_imagette); + if (data) + for (i = 0; i < samples; i++) + data[i] = (uint16_t)cmp_rand_nbits(max_used_bits->nc_imagette); + return sizeof(*data) * samples; } -static void gen_nc_offset_data(struct offset *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_nc_offset_data(struct offset *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) { - data[i].mean = cmp_rand_nbits(max_used_bits->nc_offset_mean); - data[i].variance = cmp_rand_nbits(max_used_bits->nc_offset_variance); + if (data) { + for (i = 0; i < samples; i++) { + data[i].mean = cmp_rand_nbits(max_used_bits->nc_offset_mean); + data[i].variance = cmp_rand_nbits(max_used_bits->nc_offset_variance); + } } + return sizeof(*data) * samples; } -static void gen_fc_offset_data(struct offset *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_fc_offset_data(struct offset *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) { - data[i].mean = cmp_rand_nbits(max_used_bits->fc_offset_mean); - data[i].variance = cmp_rand_nbits(max_used_bits->fc_offset_variance); + if (data) { + for (i = 0; i < samples; i++) { + data[i].mean = cmp_rand_nbits(max_used_bits->fc_offset_mean); + data[i].variance = cmp_rand_nbits(max_used_bits->fc_offset_variance); + } } + return sizeof(*data) * samples; } -static void gen_nc_background_data(struct background *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_nc_background_data(struct background *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) { - data[i].mean = cmp_rand_nbits(max_used_bits->nc_background_mean); - data[i].variance = cmp_rand_nbits(max_used_bits->nc_background_variance); - data[i].outlier_pixels = (__typeof__(data[i].outlier_pixels))cmp_rand_nbits(max_used_bits->nc_background_outlier_pixels); + if (data) { + for (i = 0; i < samples; i++) { + data[i].mean = cmp_rand_nbits(max_used_bits->nc_background_mean); + data[i].variance = cmp_rand_nbits(max_used_bits->nc_background_variance); + data[i].outlier_pixels = (__typeof__(data[i].outlier_pixels))cmp_rand_nbits(max_used_bits->nc_background_outlier_pixels); + } } + return sizeof(*data) * samples; } -static void gen_fc_background_data(struct background *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_fc_background_data(struct background *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) { - data[i].mean = cmp_rand_nbits(max_used_bits->fc_background_mean); - data[i].variance = cmp_rand_nbits(max_used_bits->fc_background_variance); - data[i].outlier_pixels = (__typeof__(data[i].outlier_pixels))cmp_rand_nbits(max_used_bits->fc_background_outlier_pixels); + if (data) { + for (i = 0; i < samples; i++) { + data[i].mean = cmp_rand_nbits(max_used_bits->fc_background_mean); + data[i].variance = cmp_rand_nbits(max_used_bits->fc_background_variance); + data[i].outlier_pixels = (__typeof__(data[i].outlier_pixels))cmp_rand_nbits(max_used_bits->fc_background_outlier_pixels); + } } + return sizeof(*data) * samples; } -static void gen_smearing_data(struct smearing *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_smearing_data(struct smearing *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) { - data[i].mean = cmp_rand_nbits(max_used_bits->smearing_mean); - data[i].variance_mean = (__typeof__(data[i].variance_mean))cmp_rand_nbits(max_used_bits->smearing_variance_mean); - data[i].outlier_pixels = (__typeof__(data[i].outlier_pixels))cmp_rand_nbits(max_used_bits->smearing_outlier_pixels); + if (data) { + for (i = 0; i < samples; i++) { + data[i].mean = cmp_rand_nbits(max_used_bits->smearing_mean); + data[i].variance_mean = (__typeof__(data[i].variance_mean))cmp_rand_nbits(max_used_bits->smearing_variance_mean); + data[i].outlier_pixels = (__typeof__(data[i].outlier_pixels))cmp_rand_nbits(max_used_bits->smearing_outlier_pixels); + } } + return sizeof(*data) * samples; } -static void gen_s_fx_data(struct s_fx *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_s_fx_data(struct s_fx *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) { - data[i].exp_flags = (__typeof__(data[i].exp_flags))cmp_rand_nbits(max_used_bits->s_exp_flags); - data[i].fx = cmp_rand_nbits(max_used_bits->s_fx); + if (data) { + for (i = 0; i < samples; i++) { + data[i].exp_flags = (__typeof__(data[i].exp_flags))cmp_rand_nbits(max_used_bits->s_exp_flags); + data[i].fx = cmp_rand_nbits(max_used_bits->s_fx); + } } + return sizeof(*data) * samples; } -static void gen_s_fx_efx_data(struct s_fx_efx *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_s_fx_efx_data(struct s_fx_efx *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) { - data[i].exp_flags = (__typeof__(data[i].exp_flags))cmp_rand_nbits(max_used_bits->s_exp_flags); - data[i].fx = cmp_rand_nbits(max_used_bits->s_fx); - data[i].efx = cmp_rand_nbits(max_used_bits->s_efx); + if (data) { + for (i = 0; i < samples; i++) { + data[i].exp_flags = (__typeof__(data[i].exp_flags))cmp_rand_nbits(max_used_bits->s_exp_flags); + data[i].fx = cmp_rand_nbits(max_used_bits->s_fx); + data[i].efx = cmp_rand_nbits(max_used_bits->s_efx); + } } + return sizeof(*data) * samples; } -static void gen_s_fx_ncob_data(struct s_fx_ncob *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_s_fx_ncob_data(struct s_fx_ncob *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) { - data[i].exp_flags = (__typeof__(data[i].exp_flags))cmp_rand_nbits(max_used_bits->s_exp_flags); - data[i].fx = cmp_rand_nbits(max_used_bits->s_fx); - data[i].ncob_x = cmp_rand_nbits(max_used_bits->s_ncob); - data[i].ncob_y = cmp_rand_nbits(max_used_bits->s_ncob); + if (data) { + for (i = 0; i < samples; i++) { + data[i].exp_flags = (__typeof__(data[i].exp_flags))cmp_rand_nbits(max_used_bits->s_exp_flags); + data[i].fx = cmp_rand_nbits(max_used_bits->s_fx); + data[i].ncob_x = cmp_rand_nbits(max_used_bits->s_ncob); + data[i].ncob_y = cmp_rand_nbits(max_used_bits->s_ncob); + } } + return sizeof(*data) * samples; } -static void gen_s_fx_efx_ncob_ecob_data(struct s_fx_efx_ncob_ecob *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_s_fx_efx_ncob_ecob_data(struct s_fx_efx_ncob_ecob *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) { - data[i].exp_flags = (__typeof__(data[i].exp_flags))cmp_rand_nbits(max_used_bits->s_exp_flags); - data[i].fx = cmp_rand_nbits(max_used_bits->s_fx); - data[i].ncob_x = cmp_rand_nbits(max_used_bits->s_ncob); - data[i].ncob_y = cmp_rand_nbits(max_used_bits->s_ncob); - data[i].efx = cmp_rand_nbits(max_used_bits->s_efx); - data[i].ecob_x = cmp_rand_nbits(max_used_bits->s_ecob); - data[i].ecob_y = cmp_rand_nbits(max_used_bits->s_ecob); + if (data) { + for (i = 0; i < samples; i++) { + data[i].exp_flags = (__typeof__(data[i].exp_flags))cmp_rand_nbits(max_used_bits->s_exp_flags); + data[i].fx = cmp_rand_nbits(max_used_bits->s_fx); + data[i].ncob_x = cmp_rand_nbits(max_used_bits->s_ncob); + data[i].ncob_y = cmp_rand_nbits(max_used_bits->s_ncob); + data[i].efx = cmp_rand_nbits(max_used_bits->s_efx); + data[i].ecob_x = cmp_rand_nbits(max_used_bits->s_ecob); + data[i].ecob_y = cmp_rand_nbits(max_used_bits->s_ecob); + } } + return sizeof(*data) * samples; } -static void gen_f_fx_data(struct f_fx *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_f_fx_data(struct f_fx *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) - data[i].fx = cmp_rand_nbits(max_used_bits->f_fx); + if (data) + for (i = 0; i < samples; i++) + data[i].fx = cmp_rand_nbits(max_used_bits->f_fx); + return sizeof(*data) * samples; } -static void gen_f_fx_efx_data(struct f_fx_efx *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_f_fx_efx_data(struct f_fx_efx *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) { - data[i].fx = cmp_rand_nbits(max_used_bits->f_fx); - data[i].efx = cmp_rand_nbits(max_used_bits->f_efx); + if (data) { + for (i = 0; i < samples; i++) { + data[i].fx = cmp_rand_nbits(max_used_bits->f_fx); + data[i].efx = cmp_rand_nbits(max_used_bits->f_efx); + } } + return sizeof(*data) * samples; } -static void gen_f_fx_ncob_data(struct f_fx_ncob *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_f_fx_ncob_data(struct f_fx_ncob *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) { - data[i].fx = cmp_rand_nbits(max_used_bits->f_fx); - data[i].ncob_x = cmp_rand_nbits(max_used_bits->f_ncob); - data[i].ncob_y = cmp_rand_nbits(max_used_bits->f_ncob); + if (data) { + for (i = 0; i < samples; i++) { + data[i].fx = cmp_rand_nbits(max_used_bits->f_fx); + data[i].ncob_x = cmp_rand_nbits(max_used_bits->f_ncob); + data[i].ncob_y = cmp_rand_nbits(max_used_bits->f_ncob); + } } + return sizeof(*data) * samples; } -static void gen_f_fx_efx_ncob_ecob_data(struct f_fx_efx_ncob_ecob *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_f_fx_efx_ncob_ecob_data(struct f_fx_efx_ncob_ecob *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) { - data[i].fx = cmp_rand_nbits(max_used_bits->f_fx); - data[i].ncob_x = cmp_rand_nbits(max_used_bits->f_ncob); - data[i].ncob_y = cmp_rand_nbits(max_used_bits->f_ncob); - data[i].efx = cmp_rand_nbits(max_used_bits->f_efx); - data[i].ecob_x = cmp_rand_nbits(max_used_bits->f_ecob); - data[i].ecob_y = cmp_rand_nbits(max_used_bits->f_ecob); + if (data) { + for (i = 0; i < samples; i++) { + data[i].fx = cmp_rand_nbits(max_used_bits->f_fx); + data[i].ncob_x = cmp_rand_nbits(max_used_bits->f_ncob); + data[i].ncob_y = cmp_rand_nbits(max_used_bits->f_ncob); + data[i].efx = cmp_rand_nbits(max_used_bits->f_efx); + data[i].ecob_x = cmp_rand_nbits(max_used_bits->f_ecob); + data[i].ecob_y = cmp_rand_nbits(max_used_bits->f_ecob); + } } + return sizeof(*data) * samples; } -static void gen_l_fx_data(struct l_fx *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_l_fx_data(struct l_fx *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) { - data[i].exp_flags = cmp_rand_nbits(max_used_bits->l_exp_flags); - data[i].fx = cmp_rand_nbits(max_used_bits->l_fx); - data[i].fx_variance = cmp_rand_nbits(max_used_bits->l_fx_variance); + if (data) { + for (i = 0; i < samples; i++) { + data[i].exp_flags = cmp_rand_nbits(max_used_bits->l_exp_flags); + data[i].fx = cmp_rand_nbits(max_used_bits->l_fx); + data[i].fx_variance = cmp_rand_nbits(max_used_bits->l_fx_variance); + } } + return sizeof(*data) * samples; } -static void gen_l_fx_efx_data(struct l_fx_efx *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_l_fx_efx_data(struct l_fx_efx *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { uint32_t i; - for (i = 0; i < samples; i++) { - data[i].exp_flags = cmp_rand_nbits(max_used_bits->l_exp_flags); - data[i].fx = cmp_rand_nbits(max_used_bits->l_fx); - data[i].efx = cmp_rand_nbits(max_used_bits->l_efx); - data[i].fx_variance = cmp_rand_nbits(max_used_bits->l_fx_variance); + if (data) { + for (i = 0; i < samples; i++) { + data[i].exp_flags = cmp_rand_nbits(max_used_bits->l_exp_flags); + data[i].fx = cmp_rand_nbits(max_used_bits->l_fx); + data[i].efx = cmp_rand_nbits(max_used_bits->l_efx); + data[i].fx_variance = cmp_rand_nbits(max_used_bits->l_fx_variance); + } } + return sizeof(*data) * samples; } -static void gen_l_fx_ncob_data(struct l_fx_ncob *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static size_t gen_l_fx_ncob_data(struct l_fx_ncob *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) { - uint32_t i; + if (data) { + uint32_t i; + + for (i = 0; i < samples; i++) { + data[i].exp_flags = cmp_rand_nbits(max_used_bits->l_exp_flags); + data[i].fx = cmp_rand_nbits(max_used_bits->l_fx); + data[i].ncob_x = cmp_rand_nbits(max_used_bits->l_ncob); + data[i].ncob_y = cmp_rand_nbits(max_used_bits->l_ncob); + data[i].fx_variance = cmp_rand_nbits(max_used_bits->l_fx_variance); + data[i].cob_x_variance = cmp_rand_nbits(max_used_bits->l_cob_variance); + data[i].cob_y_variance = cmp_rand_nbits(max_used_bits->l_cob_variance); + } + } + return sizeof(*data) * samples; +} - for (i = 0; i < samples; i++) { - data[i].exp_flags = cmp_rand_nbits(max_used_bits->l_exp_flags); - data[i].fx = cmp_rand_nbits(max_used_bits->l_fx); - data[i].ncob_x = cmp_rand_nbits(max_used_bits->l_ncob); - data[i].ncob_y = cmp_rand_nbits(max_used_bits->l_ncob); - data[i].fx_variance = cmp_rand_nbits(max_used_bits->l_fx_variance); - data[i].cob_x_variance = cmp_rand_nbits(max_used_bits->l_cob_variance); - data[i].cob_y_variance = cmp_rand_nbits(max_used_bits->l_cob_variance); + +static size_t gen_l_fx_efx_ncob_ecob_data(struct l_fx_efx_ncob_ecob *data, uint32_t samples, + const struct cmp_max_used_bits *max_used_bits) +{ + if (data) { + uint32_t i; + + for (i = 0; i < samples; i++) { + data[i].exp_flags = cmp_rand_nbits(max_used_bits->l_exp_flags); + data[i].fx = cmp_rand_nbits(max_used_bits->l_fx); + data[i].ncob_x = cmp_rand_nbits(max_used_bits->l_ncob); + data[i].ncob_y = cmp_rand_nbits(max_used_bits->l_ncob); + data[i].efx = cmp_rand_nbits(max_used_bits->l_efx); + data[i].ecob_x = cmp_rand_nbits(max_used_bits->l_ecob); + data[i].ecob_y = cmp_rand_nbits(max_used_bits->l_ecob); + data[i].fx_variance = cmp_rand_nbits(max_used_bits->l_fx_variance); + data[i].cob_x_variance = cmp_rand_nbits(max_used_bits->l_cob_variance); + data[i].cob_y_variance = cmp_rand_nbits(max_used_bits->l_cob_variance); + } } + return sizeof(*data) * samples; } -static void gen_l_fx_efx_ncob_ecob_data(struct l_fx_efx_ncob_ecob *data, uint32_t samples, - const struct cmp_max_used_bits *max_used_bits) +static uint8_t get_sst(enum cmp_data_type data_type) { - uint32_t i; + uint8_t sst = 0; - for (i = 0; i < samples; i++) { - data[i].exp_flags = cmp_rand_nbits(max_used_bits->l_exp_flags); - data[i].fx = cmp_rand_nbits(max_used_bits->l_fx); - data[i].ncob_x = cmp_rand_nbits(max_used_bits->l_ncob); - data[i].ncob_y = cmp_rand_nbits(max_used_bits->l_ncob); - data[i].efx = cmp_rand_nbits(max_used_bits->l_efx); - data[i].ecob_x = cmp_rand_nbits(max_used_bits->l_ecob); - data[i].ecob_y = cmp_rand_nbits(max_used_bits->l_ecob); - data[i].fx_variance = cmp_rand_nbits(max_used_bits->l_fx_variance); - data[i].cob_x_variance = cmp_rand_nbits(max_used_bits->l_cob_variance); - data[i].cob_y_variance = cmp_rand_nbits(max_used_bits->l_cob_variance); + switch (data_type) { + case DATA_TYPE_IMAGETTE: + case DATA_TYPE_IMAGETTE_ADAPTIVE: + sst = SST_NCxx_S_SCIENCE_IMAGETTE; + break; + case DATA_TYPE_SAT_IMAGETTE: + case DATA_TYPE_SAT_IMAGETTE_ADAPTIVE: + sst = SST_NCxx_S_SCIENCE_SAT_IMAGETTE; + break; + case DATA_TYPE_OFFSET: + sst = SST_NCxx_S_SCIENCE_OFFSET; + break; + case DATA_TYPE_BACKGROUND: + sst = SST_NCxx_S_SCIENCE_BACKGROUND; + break; + case DATA_TYPE_SMEARING: + sst = SST_NCxx_S_SCIENCE_SMEARING; + break; + case DATA_TYPE_S_FX: + sst = SST_NCxx_S_SCIENCE_S_FX; + break; + case DATA_TYPE_S_FX_EFX: + sst = SST_NCxx_S_SCIENCE_S_FX_EFX; + break; + case DATA_TYPE_S_FX_NCOB: + sst = SST_NCxx_S_SCIENCE_S_FX_NCOB; + break; + case DATA_TYPE_S_FX_EFX_NCOB_ECOB: + sst = SST_NCxx_S_SCIENCE_S_FX_EFX_NCOB_ECOB; + break; + case DATA_TYPE_L_FX: + sst = SST_NCxx_S_SCIENCE_L_FX; + break; + case DATA_TYPE_L_FX_EFX: + sst = SST_NCxx_S_SCIENCE_L_FX_EFX; + break; + case DATA_TYPE_L_FX_NCOB: + sst = SST_NCxx_S_SCIENCE_L_FX_NCOB; + break; + case DATA_TYPE_L_FX_EFX_NCOB_ECOB: + sst = SST_NCxx_S_SCIENCE_L_FX_EFX_NCOB_ECOB; + break; + case DATA_TYPE_F_FX: + sst = SST_NCxx_S_SCIENCE_F_FX; + break; + case DATA_TYPE_F_FX_EFX: + sst = SST_NCxx_S_SCIENCE_F_FX_EFX; + break; + case DATA_TYPE_F_FX_NCOB: + sst = SST_NCxx_S_SCIENCE_F_FX_NCOB; + break; + case DATA_TYPE_F_FX_EFX_NCOB_ECOB: + sst = SST_NCxx_S_SCIENCE_F_FX_EFX_NCOB_ECOB; + break; + case DATA_TYPE_F_CAM_IMAGETTE: + case DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE: + sst = SST_FCx_S_SCIENCE_IMAGETTE; + break; + case DATA_TYPE_F_CAM_OFFSET: + sst = SST_FCx_S_SCIENCE_OFFSET_VALUES; + break; + case DATA_TYPE_F_CAM_BACKGROUND: + sst = SST_FCx_S_BACKGROUND_VALUES; + break; + default: + case DATA_TYPE_UNKNOWN: + TEST_FAIL(); + /* debug_print("Error: Unknown compression data type!\n"); */ + }; + + return sst; +} + + +size_t generate_random_collection_hdr(struct collection_hdr *col, enum cmp_data_type data_type, + uint32_t samples) +{ + static uint8_t sequence_num; + size_t data_size = size_of_a_sample(data_type)*samples; + + TEST_ASSERT(data_size <= UINT16_MAX); + + if (col) { + TEST_ASSERT_FALSE(cmp_col_set_timestamp(col, cmp_ent_create_timestamp(NULL))); + TEST_ASSERT_FALSE(cmp_col_set_configuration_id(col, (uint16_t)cmp_rand32())); + + TEST_ASSERT_FALSE(cmp_col_set_pkt_type(col, COL_SCI_PKTS_TYPE)); + TEST_ASSERT_FALSE(cmp_col_set_subservice(col, get_sst(data_type))); + TEST_ASSERT_FALSE(cmp_col_set_ccd_id(col, (uint8_t)cmp_rand_between(0, 3))); + TEST_ASSERT_FALSE(cmp_col_set_sequence_num(col, sequence_num++)); + + TEST_ASSERT_FALSE(cmp_col_set_data_length(col, (uint16_t)data_size)); } + return COLLECTION_HDR_SIZE; } @@ -320,26 +476,31 @@ static void gen_l_fx_efx_ncob_ecob_data(struct l_fx_efx_ncob_ecob *data, uint32_ * @returns a pointer to the generated random test data */ -void *generate_random_test_data(uint32_t samples, enum cmp_data_type data_type, - const struct cmp_max_used_bits *max_used_bits) +size_t generate_random_collection(struct collection_hdr *col, enum cmp_data_type data_type, + uint32_t samples, const struct cmp_max_used_bits *max_used_bits) { - size_t data_size = cmp_cal_size_of_data(samples, data_type); - void *data = malloc(data_size); - void *data_cpy = data; - uint8_t *p = data; + size_t size; + void *science_data = NULL; - TEST_ASSERT_NOT_EQUAL_INT(data_size, 0); - TEST_ASSERT(data_size < (CMP_ENTITY_MAX_SIZE - NON_IMAGETTE_HEADER_SIZE)); - TEST_ASSERT_NOT_NULL(data); + if (col) + science_data = col->entry; - if (!rdcu_supported_data_type_is_used(data_type)) { - int i; + if (rdcu_supported_data_type_is_used(data_type)) { + /* for the rdcu the header counts as data */ + size_t hdr_in_samples = COLLECTION_HDR_SIZE/size_of_a_sample(data_type); + TEST_ASSERT(samples >= hdr_in_samples); + samples -= hdr_in_samples; + } - TEST_ASSERT(data_size > COLLECTION_HDR_SIZE); - for (i = 0; i < COLLECTION_HDR_SIZE; ++i) - *p++ = (uint8_t)cmp_rand32(); - data = p; + size = generate_random_collection_hdr(col, data_type, samples); + /* TDOO remove me */ + int i; + for (i = 0; i < size_of_a_sample(data_type)*samples; ++i) { + if (col){ + col->entry[i] = i; + } } + return size+i; switch (data_type) { case DATA_TYPE_IMAGETTE: @@ -348,64 +509,87 @@ void *generate_random_test_data(uint32_t samples, enum cmp_data_type data_type, case DATA_TYPE_SAT_IMAGETTE_ADAPTIVE: case DATA_TYPE_F_CAM_IMAGETTE: case DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE: - gen_ima_data(data, samples, max_used_bits); + size += gen_ima_data(science_data, samples, max_used_bits); break; case DATA_TYPE_OFFSET: - gen_nc_offset_data(data, samples, max_used_bits); + size += gen_nc_offset_data(science_data, samples, max_used_bits); break; case DATA_TYPE_BACKGROUND: - gen_nc_background_data(data, samples, max_used_bits); + size += gen_nc_background_data(science_data, samples, max_used_bits); break; case DATA_TYPE_SMEARING: - gen_smearing_data(data, samples, max_used_bits); + size += gen_smearing_data(science_data, samples, max_used_bits); break; case DATA_TYPE_S_FX: - gen_s_fx_data(data, samples, max_used_bits); + size += gen_s_fx_data(science_data, samples, max_used_bits); break; case DATA_TYPE_S_FX_EFX: - gen_s_fx_efx_data(data, samples, max_used_bits); + size += gen_s_fx_efx_data(science_data, samples, max_used_bits); break; case DATA_TYPE_S_FX_NCOB: - gen_s_fx_ncob_data(data, samples, max_used_bits); + size += gen_s_fx_ncob_data(science_data, samples, max_used_bits); break; case DATA_TYPE_S_FX_EFX_NCOB_ECOB: - gen_s_fx_efx_ncob_ecob_data(data, samples, max_used_bits); + size += gen_s_fx_efx_ncob_ecob_data(science_data, samples, max_used_bits); break; case DATA_TYPE_L_FX: - gen_l_fx_data(data, samples, max_used_bits); + size += gen_l_fx_data(science_data, samples, max_used_bits); break; case DATA_TYPE_L_FX_EFX: - gen_l_fx_efx_data(data, samples, max_used_bits); + size += gen_l_fx_efx_data(science_data, samples, max_used_bits); break; case DATA_TYPE_L_FX_NCOB: - gen_l_fx_ncob_data(data, samples, max_used_bits); + size += gen_l_fx_ncob_data(science_data, samples, max_used_bits); break; case DATA_TYPE_L_FX_EFX_NCOB_ECOB: - gen_l_fx_efx_ncob_ecob_data(data, samples, max_used_bits); + size += gen_l_fx_efx_ncob_ecob_data(science_data, samples, max_used_bits); break; case DATA_TYPE_F_FX: - gen_f_fx_data(data, samples, max_used_bits); + size += gen_f_fx_data(science_data, samples, max_used_bits); break; case DATA_TYPE_F_FX_EFX: - gen_f_fx_efx_data(data, samples, max_used_bits); + size += gen_f_fx_efx_data(science_data, samples, max_used_bits); break; case DATA_TYPE_F_FX_NCOB: - gen_f_fx_ncob_data(data, samples, max_used_bits); + size += gen_f_fx_ncob_data(science_data, samples, max_used_bits); break; case DATA_TYPE_F_FX_EFX_NCOB_ECOB: - gen_f_fx_efx_ncob_ecob_data(data, samples, max_used_bits); + size += gen_f_fx_efx_ncob_ecob_data(science_data, samples, max_used_bits); break; case DATA_TYPE_F_CAM_OFFSET: - gen_fc_offset_data(data, samples, max_used_bits); + size += gen_fc_offset_data(science_data, samples, max_used_bits); break; case DATA_TYPE_F_CAM_BACKGROUND: - gen_fc_background_data(data, samples, max_used_bits); + size += gen_fc_background_data(science_data, samples, max_used_bits); break; default: TEST_FAIL(); } - return data_cpy; + return size; +} + +struct chunk_def { + enum cmp_data_type data_type; + uint32_t samples; +}; + + +static size_t generate_random_chunk(void *chunk, struct chunk_def col_array[], size_t array_elements, + const struct cmp_max_used_bits *max_used_bits) +{ + size_t i; + size_t chunk_size = 0; + struct collection_hdr *col = NULL; + + for (i = 0; i < array_elements; i++) { + if (chunk) + col = (struct collection_hdr *)((uint8_t *)chunk + chunk_size); + + chunk_size += generate_random_collection(col, col_array[i].data_type, + col_array[i].samples, max_used_bits); + } + return chunk_size; } @@ -417,34 +601,28 @@ void *generate_random_test_data(uint32_t samples, enum cmp_data_type data_type, void generate_random_cmp_par(struct cmp_cfg *cfg) { - cfg->golomb_par = cmp_rand_between(MIN_IMA_GOLOMB_PAR, MAX_IMA_GOLOMB_PAR); - cfg->ap1_golomb_par = cmp_rand_between(MIN_IMA_GOLOMB_PAR, MAX_IMA_GOLOMB_PAR); - cfg->ap2_golomb_par = cmp_rand_between(MIN_IMA_GOLOMB_PAR, MAX_IMA_GOLOMB_PAR); - - cfg->cmp_par_exp_flags = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); - cfg->cmp_par_fx = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); - cfg->cmp_par_ncob = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); - cfg->cmp_par_efx = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); - cfg->cmp_par_ecob = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); - cfg->cmp_par_fx_cob_variance = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); - cfg->cmp_par_mean = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); - cfg->cmp_par_variance = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); - cfg->cmp_par_pixels_error = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); - - - cfg->spill = cmp_rand_between(MIN_IMA_SPILL, cmp_ima_max_spill(cfg->golomb_par)); - cfg->ap1_spill = cmp_rand_between(MIN_IMA_SPILL, cmp_ima_max_spill(cfg->ap1_golomb_par)); - cfg->ap2_spill = cmp_rand_between(MIN_IMA_SPILL, cmp_ima_max_spill(cfg->ap2_golomb_par)); - - cfg->spill_exp_flags = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_exp_flags)); - cfg->spill_fx = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_fx)); - cfg->spill_ncob = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_ncob)); - cfg->spill_efx = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_efx)); - cfg->spill_ecob = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_ecob)); - cfg->spill_fx_cob_variance = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_fx_cob_variance)); - cfg->spill_mean = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_mean)); - cfg->spill_variance = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_variance)); - cfg->spill_pixels_error = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_pixels_error)); + if (cmp_imagette_data_type_is_used(cfg->data_type)) { + cfg->cmp_par_imagette = cmp_rand_between(MIN_IMA_GOLOMB_PAR, MAX_IMA_GOLOMB_PAR); + cfg->ap1_golomb_par = cmp_rand_between(MIN_IMA_GOLOMB_PAR, MAX_IMA_GOLOMB_PAR); + cfg->ap2_golomb_par = cmp_rand_between(MIN_IMA_GOLOMB_PAR, MAX_IMA_GOLOMB_PAR); + cfg->spill_imagette = cmp_rand_between(MIN_IMA_SPILL, cmp_ima_max_spill(cfg->golomb_par)); + cfg->ap1_spill = cmp_rand_between(MIN_IMA_SPILL, cmp_ima_max_spill(cfg->ap1_golomb_par)); + cfg->ap2_spill = cmp_rand_between(MIN_IMA_SPILL, cmp_ima_max_spill(cfg->ap2_golomb_par)); + } else { + cfg->cmp_par_1 = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); + cfg->cmp_par_2 = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); + cfg->cmp_par_3 = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); + cfg->cmp_par_4 = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); + cfg->cmp_par_5 = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); + cfg->cmp_par_6 = cmp_rand_between(MIN_NON_IMA_GOLOMB_PAR, MAX_NON_IMA_GOLOMB_PAR); + cfg->spill_par_1 = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_exp_flags)); + cfg->spill_par_2 = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_fx)); + cfg->spill_par_3 = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_ncob)); + cfg->spill_par_4 = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_efx)); + cfg->spill_par_5 = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_ecob)); + cfg->spill_par_6 = cmp_rand_between(MIN_NON_IMA_SPILL, cmp_icu_max_spill(cfg->cmp_par_fx_cob_variance)); + } + #if 0 if (cfg->cmp_mode == CMP_MODE_STUFF) { /* cfg->golomb_par = cmp_rand_between(16, MAX_STUFF_CMP_PAR); */ @@ -575,21 +753,37 @@ void compression_decompression(struct cmp_cfg *cfg) * @test decompress_cmp_entiy */ +#define MB *(1U<<20) +#define MAX_DATA_TO_COMPRESS_SIZE 0x1000B void test_random_compression_decompression(void) { enum cmp_data_type data_type; enum cmp_mode cmp_mode; struct cmp_cfg cfg; uint32_t cmp_buffer_size; + void *data_to_compress1 = malloc(MAX_DATA_TO_COMPRESS_SIZE); + void *data_to_compress2 = malloc(MAX_DATA_TO_COMPRESS_SIZE); + void *updated_model = calloc(1, MAX_DATA_TO_COMPRESS_SIZE); for (data_type = 1; data_type <= DATA_TYPE_F_CAM_BACKGROUND; data_type++) { /* printf("%s\n", data_type2string(data_type)); */ /* generate random data*/ - uint32_t samples = cmp_rand_between(1, 430179/CMP_BUFFER_FAKTOR); + /* uint32_t samples = cmp_rand_between(1, 430179/CMP_BUFFER_FAKTOR); */ + size_t size; + uint32_t samples = cmp_rand_between(1, UINT16_MAX/size_of_a_sample(data_type)); uint32_t model_value = cmp_rand_between(0, MAX_MODEL_VALUE); - void *data_to_compress1 = generate_random_test_data(samples, data_type, &MAX_USED_BITS_V1); - void *data_to_compress2 = generate_random_test_data(samples, data_type, &MAX_USED_BITS_V1); - void *updated_model = calloc(1, cmp_cal_size_of_data(samples, data_type)); + /* void *data_to_compress1 = generate_random_test_data(samples, data_type, &MAX_USED_BITS_V1); */ + /* void *data_to_compress2 = generate_random_test_data(samples, data_type, &MAX_USED_BITS_V1); */ + /* void *updated_model = calloc(1, cmp_cal_size_of_data(samples, data_type)); */ + /* memset(updated_model, 0, MAX_DATA_TO_COMPRESS_SIZE); */ + + size = generate_random_collection(NULL, data_type, samples, &MAX_USED_BITS_V1); + TEST_ASSERT(size <= MAX_DATA_TO_COMPRESS_SIZE); + size = generate_random_collection(data_to_compress1, data_type, samples, &MAX_USED_BITS_V1); + TEST_ASSERT(size <= MAX_DATA_TO_COMPRESS_SIZE); + size = generate_random_collection(data_to_compress2, data_type, samples, &MAX_USED_BITS_V1); + TEST_ASSERT(size <= MAX_DATA_TO_COMPRESS_SIZE); + /* for (cmp_mode = CMP_MODE_RAW; cmp_mode <= CMP_MODE_STUFF; cmp_mode++) { */ for (cmp_mode = CMP_MODE_RAW; cmp_mode <= CMP_MODE_DIFF_MULTI; cmp_mode++) { /* printf("cmp_mode: %i\n", cmp_mode); */ @@ -610,11 +804,11 @@ void test_random_compression_decompression(void) compression_decompression(&cfg); } - free(data_to_compress1); - free(data_to_compress2); - free(updated_model); } compression_decompression(NULL); + free(data_to_compress1); + free(data_to_compress2); + free(updated_model); } @@ -664,3 +858,635 @@ void test_random_compression_decompression2(void) free(compressed_data); free(decompressed_data); } + +#if 0 +int icu_compress_chunk(void *chunk, size_t chunk_size, void *model, void *dst, + size_t dst_capacity); +void no_test_chunk(void) +{ + size_t s, i; + struct todo chunk_struct[3]; + uint8_t *buf; + uint8_t *dst; + + cmp_rand_seed(0); + chunk_struct[0].data_type = DATA_TYPE_F_FX; + chunk_struct[0].samples = 10; + + chunk_struct[1].data_type = DATA_TYPE_S_FX; + chunk_struct[1].samples = 3; + + chunk_struct[2].data_type = DATA_TYPE_SMEARING; + chunk_struct[2].samples = 4; + + s = generate_random_chunk(NULL, chunk_struct, ARRAY_SIZE(chunk_struct), &MAX_USED_BITS_V1); + printf("s: %zu\n", s); + + buf = malloc(s); + s = generate_random_chunk(buf, chunk_struct, ARRAY_SIZE(chunk_struct), &MAX_USED_BITS_V1); + printf("data to compress (s: %zu)\n", s); + for (i = 0; i < s; ++i) { + printf("%02X", buf[i]); + if ((i + 1) % 2 == 0) + printf("\n"); + } + + dst = malloc(s+1000); + s = icu_compress_chunk(buf, s, NULL, dst, s+1000); + printf("\n\ncompressed data (s: %zu)\n", s); + for (i = 0; i < s; ++i) { + printf("%02X", dst[i]); + if ((i + 1) % 2 == 0) + printf("\n"); + } + + free(dst); + free(buf); +} +#endif +#include <byteorder.h> + +size_t set_cmp_size(uint8_t *p, uint16_t v) +{ + v -= COLLECTION_HDR_SIZE; + memset(p, v >> 8, 1); + memset(p+1, v & 0xFF, 1); + return sizeof(uint16_t); +} + +uint16_t get_cmp_size(uint8_t *p) +{ + return ((uint16_t)p[0]<<8) + p[1]; +} +#if 0 +remove this +void no_test_new_format(void) +{ + uint32_t data_size = cmp_cal_size_of_data(3, DATA_TYPE_L_FX_EFX_NCOB_ECOB); + data_size += cmp_cal_size_of_data(2, DATA_TYPE_L_FX_EFX_NCOB_ECOB); + data_size += cmp_cal_size_of_data(5, DATA_TYPE_L_FX); + data_size += cmp_cal_size_of_data(2, DATA_TYPE_L_FX); + data_size += 3*sizeof(uint16_t); + + uint32_t ent_size = cmp_ent_create(NULL, DATA_TYPE_L_FX, 1, data_size); + void *ent = calloc(1, ent_size); + ent_size = cmp_ent_create(ent, DATA_TYPE_L_FX, 1, data_size); + + char *p = cmp_ent_get_data_buf(ent); + p +=2; + uint16_t s = generate_random_collection(p, DATA_TYPE_L_FX, 2, &MAX_USED_BITS_V1); + p += set_cmp_size(p-2, s); + p += s; + s = generate_random_collection(p, DATA_TYPE_L_FX, 5, &MAX_USED_BITS_V1); + p += set_cmp_size(p-2, s); + p += s; + s = generate_random_collection(p, DATA_TYPE_L_FX_EFX_NCOB_ECOB, 2, &MAX_USED_BITS_V1); + p += set_cmp_size(p-2, s); + p += s; + s = generate_random_collection(p, DATA_TYPE_L_FX_EFX_NCOB_ECOB, 2, &MAX_USED_BITS_V1); + p+=s; + + int num_of_coll =4; + + uint8_t *d_p = cmp_ent_get_data_buf(ent); + uint32_t sum =0; + s =0; + for (int c = 1; c <= num_of_coll ; c++) { + uint16_t cmp_col_size; + if (c == num_of_coll) + cmp_col_size = cmp_ent_get_cmp_data_size(ent)- sum; + else{ + cmp_col_size = get_cmp_size(&d_p[s]); + sum += cmp_col_size; + s+=2; + } + + uint16_t col_size = cmp_col_get_data_length(&d_p[s]); + printf("cmp_col_sizel: %X col_size: %X\n", cmp_col_size, col_size); + for (int i = s-2; i < s+col_size +COLLECTION_HDR_SIZE; ++i) { + printf("%02X ",((uint8_t *)d_p)[i]); + if ((i + 1) % 2 == 0) + printf("\n"); + } + TEST_ASSERT(cmp_col_size == col_size); + s+=col_size+COLLECTION_HDR_SIZE; + } + +} +#endif +#if 0 +/* #include "../../lib/common/byteorder.h" */ +void NOOO_test_cmp_collection_raw(void) +{ + struct collection_hdr *col = NULL; + uint32_t samples = 2; + size_t col_size, dst_capacity = 0; + struct s_fx *data; + uint32_t *dst = NULL; + int dst_capacity_used = 0; + struct cmp_par par = {0}; + const size_t exp_col_size = COLLECTION_HDR_SIZE+2*sizeof(struct s_fx); + const size_t exp_cmp_size_byte = exp_col_size; + + par.cmp_mode = CMP_MODE_RAW; + + col_size = generate_random_collection(col, DATA_TYPE_S_FX, samples, &MAX_USED_BITS_SAFE); + TEST_ASSERT_EQUAL(exp_col_size, col_size); + col = malloc(col_size); + TEST_ASSERT_NOT_NULL(col); + col_size = generate_random_collection(col, DATA_TYPE_S_FX, samples, &MAX_USED_BITS_SAFE); + TEST_ASSERT_EQUAL(exp_col_size, col_size); + + data = (struct s_fx *)col->entry; + data[0].exp_flags = 0; + data[0].fx = 0; + data[1].exp_flags = 0xF0; + data[1].fx = 0xABCDE0FF; + + + dst_capacity = (size_t)cmp_collection(col, NULL, dst, dst_capacity, &par, dst_capacity_used); + TEST_ASSERT_EQUAL_INT(exp_cmp_size_byte, dst_capacity); + dst = malloc(dst_capacity); + TEST_ASSERT_NOT_NULL(dst); + dst_capacity_used = cmp_collection(col, NULL, dst, dst_capacity, &par, dst_capacity_used); + TEST_ASSERT_EQUAL_INT(exp_cmp_size_byte, dst_capacity_used); + + { + uint8_t *p = (uint8_t *)dst; + struct collection_hdr *raw_cmp_col = (struct collection_hdr *)p; + struct s_fx *raw_cmp_data = (void *)raw_cmp_col->entry; + + /* TEST_ASSERT_EQUAL_UINT(cpu_to_be16(2*sizeof(struct s_fx)), ((uint16_t *)p)[0]); */ + TEST_ASSERT(memcmp(col, raw_cmp_col, COLLECTION_HDR_SIZE) == 0); + TEST_ASSERT_EQUAL_HEX(data[0].exp_flags, raw_cmp_data[0].exp_flags); + TEST_ASSERT_EQUAL_HEX(data[0].fx, be32_to_cpu(raw_cmp_data[0].fx)); + TEST_ASSERT_EQUAL_HEX(data[1].exp_flags, raw_cmp_data[1].exp_flags); + TEST_ASSERT_EQUAL_HEX(data[1].fx, be32_to_cpu(raw_cmp_data[1].fx)); + } + + memset(dst, 0, dst_capacity); + + dst_capacity -= 1; + dst_capacity_used = cmp_collection(col, NULL, dst, dst_capacity, &par, dst_capacity_used); + TEST_ASSERT_EQUAL_INT(CMP_ERROR_SMALL_BUF, dst_capacity_used); + + free(col); + free(dst); +} + + +void NOOO_test_cmp_collection_diff(void) +{ + struct collection_hdr *col = NULL; + uint32_t *dst = NULL; + size_t dst_capacity = 0; + int dst_capacity_used = 33; + struct cmp_par par = {0}; + const uint16_t cmp_size_byte_exp= 2; + + + { /* generate test data */ + struct s_fx *data; + uint32_t samples = 2; + size_t col_size; + const size_t exp_col_size = COLLECTION_HDR_SIZE+samples*sizeof(*data); + + col_size = generate_random_collection(col, DATA_TYPE_S_FX, samples, &MAX_USED_BITS_SAFE); + TEST_ASSERT_EQUAL(exp_col_size, col_size); + col = malloc(col_size); TEST_ASSERT_NOT_NULL(col); + col_size = generate_random_collection(col, DATA_TYPE_S_FX, samples, &MAX_USED_BITS_SAFE); + TEST_ASSERT_EQUAL(exp_col_size, col_size); + + data = (struct s_fx *)col->entry; + data[0].exp_flags = 0; + data[0].fx = 0; + data[1].exp_flags = 1; + data[1].fx = 1; + } + + { /* compress data */ + int cmp_size_byte; + const int exp_cmp_size_byte = dst_capacity_used + CMP_COLLECTION_FILD_SIZE + + COLLECTION_HDR_SIZE + cmp_size_byte_exp;; + + par.cmp_mode = CMP_MODE_DIFF_ZERO; + par.s_exp_flags = 1; + par.s_fx = 1; + + cmp_size_byte = cmp_collection(col, NULL, dst, dst_capacity, &par, dst_capacity_used); + TEST_ASSERT_EQUAL_INT(exp_cmp_size_byte, cmp_size_byte); + dst_capacity = (size_t)ROUND_UP_TO_MULTIPLE_OF_4(cmp_size_byte); + dst = malloc(dst_capacity); TEST_ASSERT_NOT_NULL(dst); + memset(dst, 0xFF, dst_capacity); + cmp_size_byte = cmp_collection(col, NULL, dst, dst_capacity, &par, dst_capacity_used); + TEST_ASSERT_EQUAL_INT(exp_cmp_size_byte, cmp_size_byte); + } + + { /* check the compressed data */ + uint8_t *p = (uint8_t *)dst; + uint16_t cmp_collection_size_exp = cpu_to_be16(cmp_size_byte_exp); + + TEST_ASSERT_EACH_EQUAL_HEX8(0xFF, p, dst_capacity_used); + p += dst_capacity_used; + + TEST_ASSERT_EQUAL_HEX8_ARRAY(&cmp_collection_size_exp, p, CMP_COLLECTION_FILD_SIZE); + p += CMP_COLLECTION_FILD_SIZE; + + TEST_ASSERT(memcmp(col, p, COLLECTION_HDR_SIZE) == 0); + p += COLLECTION_HDR_SIZE; + + TEST_ASSERT_EQUAL_HEX8(0xAE, *p++); + TEST_ASSERT_EQUAL_HEX8(0xE0, *p++); + TEST_ASSERT_EQUAL_HEX8(0x00, *p++); + TEST_ASSERT_EQUAL_HEX8(0x00, *p++); + TEST_ASSERT_EQUAL_HEX8(0x00, *p++); + + TEST_ASSERT_EQUAL_size_t(dst_capacity, p - (uint8_t *)dst); + } + + + /* error cases dst buffer to small */ + dst_capacity -= 1; + dst_capacity_used = cmp_collection(col, NULL, dst, dst_capacity, &par, dst_capacity_used); + TEST_ASSERT_EQUAL_INT(CMP_ERROR_SMALL_BUF, dst_capacity_used); + + free(col); + free(dst); +} + + +void NOOO_test_cmp_collection_worst_case(void) +{ + struct collection_hdr *col = NULL; + uint32_t *dst = NULL; + size_t dst_capacity = 0; + int dst_capacity_used = 4; + struct cmp_par par = {0}; + const uint16_t cmp_size_byte_exp= 2*sizeof(struct s_fx); + int cmp_size_byte; + + { /* generate test data */ + struct s_fx *data; + uint32_t samples = 2; + size_t col_size; + const size_t exp_col_size = COLLECTION_HDR_SIZE+samples*sizeof(*data); + + col_size = generate_random_collection(col, DATA_TYPE_S_FX, samples, &MAX_USED_BITS_SAFE); + TEST_ASSERT_EQUAL(exp_col_size, col_size); + col = malloc(col_size); TEST_ASSERT_NOT_NULL(col); + col_size = generate_random_collection(col, DATA_TYPE_S_FX, samples, &MAX_USED_BITS_SAFE); + TEST_ASSERT_EQUAL(exp_col_size, col_size); + + data = (struct s_fx *)col->entry; + data[0].exp_flags = 0x4; + data[0].fx = 0x0000000E; + data[1].exp_flags = 0x4; + data[1].fx = 0x00000016; +; + } + + { /* compress data */ + const int exp_cmp_size_byte = dst_capacity_used + CMP_COLLECTION_FILD_SIZE + + COLLECTION_HDR_SIZE + cmp_size_byte_exp;; + + par.cmp_mode = CMP_MODE_DIFF_ZERO; + par.s_exp_flags = 1; + par.s_fx = 1; + + cmp_size_byte = cmp_collection(col, NULL, dst, dst_capacity, &par, dst_capacity_used); + TEST_ASSERT_EQUAL_INT(exp_cmp_size_byte, cmp_size_byte); + dst_capacity = 1000; + dst = malloc(dst_capacity); TEST_ASSERT_NOT_NULL(dst); + memset(dst, 0xFF, dst_capacity); + cmp_size_byte = cmp_collection(col, NULL, dst, dst_capacity, &par, dst_capacity_used); + TEST_ASSERT_EQUAL_INT(exp_cmp_size_byte, cmp_size_byte); + } + + { /* check the compressed data */ + uint8_t *p = (uint8_t *)dst; + uint16_t cmp_collection_size_exp = cpu_to_be16(cmp_size_byte_exp); + + TEST_ASSERT_EACH_EQUAL_HEX8(0xFF, p, dst_capacity_used); + p += dst_capacity_used; + + TEST_ASSERT_EQUAL_HEX8_ARRAY(&cmp_collection_size_exp, p, CMP_COLLECTION_FILD_SIZE); + p += CMP_COLLECTION_FILD_SIZE; + + TEST_ASSERT(memcmp(col, p, COLLECTION_HDR_SIZE) == 0); + p += COLLECTION_HDR_SIZE; + + TEST_ASSERT_EQUAL_HEX8(0x04, *p++); + TEST_ASSERT_EQUAL_HEX8(0x00, *p++); + TEST_ASSERT_EQUAL_HEX8(0x00, *p++); + TEST_ASSERT_EQUAL_HEX8(0x00, *p++); + TEST_ASSERT_EQUAL_HEX8(0x0E, *p++); + TEST_ASSERT_EQUAL_HEX8(0x04, *p++); + TEST_ASSERT_EQUAL_HEX8(0x00, *p++); + TEST_ASSERT_EQUAL_HEX8(0x00, *p++); + TEST_ASSERT_EQUAL_HEX8(0x00, *p++); + TEST_ASSERT_EQUAL_HEX8(0x16, *p++); + + TEST_ASSERT_EQUAL_size_t(cmp_size_byte, p - (uint8_t *)dst); + } +} +#endif + + +void test_cmp_chunk_raw2(void) +{ + struct cmp_par par = {0}; + struct chunk_def chunk_def[2] = {{DATA_TYPE_S_FX, 2}, {DATA_TYPE_S_FX_EFX_NCOB_ECOB, 3}}; + size_t chunk_size; + size_t chunk_size_exp = 2*sizeof(struct s_fx) + 3*sizeof(struct s_fx_efx_ncob_ecob) + 2*COLLECTION_HDR_SIZE; + void *chunk = NULL; + uint32_t *dst = NULL; + int dst_capacity = 0; + + /* generate test data */ + chunk_size = generate_random_chunk(chunk, chunk_def, ARRAY_SIZE(chunk_def), &MAX_USED_BITS_SAFE); + TEST_ASSERT_EQUAL_size_t(chunk_size_exp, chunk_size); + chunk = calloc(1, chunk_size); + TEST_ASSERT_NOT_NULL(chunk); + chunk_size = generate_random_chunk(chunk, chunk_def, ARRAY_SIZE(chunk_def), &MAX_USED_BITS_SAFE); + TEST_ASSERT_EQUAL_size_t(chunk_size_exp, chunk_size); + + /* "compress" data */ + { + size_t cmp_size_byte_exp = GENERIC_HEADER_SIZE + chunk_size_exp; + + par.cmp_mode = CMP_MODE_RAW; + + dst_capacity = compress_chunk(chunk, chunk_size, NULL, NULL, dst, dst_capacity, &par); + TEST_ASSERT_EQUAL_INT(cmp_size_byte_exp, dst_capacity); + dst = calloc(1, dst_capacity); + TEST_ASSERT_NOT_NULL(dst); + dst_capacity = compress_chunk(chunk, chunk_size, NULL, NULL, dst, dst_capacity, &par); + TEST_ASSERT_EQUAL_INT(cmp_size_byte_exp, dst_capacity); + } + + /* check results */ + { + uint8_t *p = (uint8_t *)dst; + /* uint16_t cmp_collection_size_exp = cpu_to_be16(2*sizeof(struct s_fx)); */ + struct collection_hdr *col = (struct collection_hdr *)chunk; + struct s_fx *cmp_data_raw_1; + struct s_fx *data = (void *)col->entry; + int i; + + /* TODO: Check header */ + TEST_ASSERT_EQUAL_HEX(chunk_size, cmp_ent_get_original_size(dst)); + TEST_ASSERT_EQUAL_HEX(chunk_size+GENERIC_HEADER_SIZE, cmp_ent_get_size(dst)); + + p += GENERIC_HEADER_SIZE; + + + /* TEST_ASSERT(memcmp(p, &cmp_collection_size_exp, CMP_COLLECTION_FILD_SIZE) == 0); */ + /* p += CMP_COLLECTION_FILD_SIZE; */ + + TEST_ASSERT(memcmp(col, p, COLLECTION_HDR_SIZE) == 0); + p += COLLECTION_HDR_SIZE; + + cmp_data_raw_1 = (struct s_fx *)p; + TEST_ASSERT_EQUAL_HEX(data[0].exp_flags, cmp_data_raw_1[0].exp_flags); + TEST_ASSERT_EQUAL_HEX(data[0].fx, be32_to_cpu(cmp_data_raw_1[0].fx)); + TEST_ASSERT_EQUAL_HEX(data[1].exp_flags, cmp_data_raw_1[1].exp_flags); + TEST_ASSERT_EQUAL_HEX(data[1].fx, be32_to_cpu(cmp_data_raw_1[1].fx)); + p += 2*sizeof(struct s_fx); + + /* check 2nd collection */ + /* cmp_collection_size_exp = cpu_to_be16(3*sizeof(struct s_fx_efx_ncob_ecob)); */ + /* TEST_ASSERT(memcmp(p, &cmp_collection_size_exp, CMP_COLLECTION_FILD_SIZE) == 0); */ + /* p += CMP_COLLECTION_FILD_SIZE; */ + + col = (struct collection_hdr *) ((char *)col + cmp_col_get_size(col)); + TEST_ASSERT(memcmp(col, p, COLLECTION_HDR_SIZE) == 0); + p += COLLECTION_HDR_SIZE; + + for (i = 0; i < 3; i++) { + struct s_fx_efx_ncob_ecob *raw_cmp_data2 = (struct s_fx_efx_ncob_ecob *)p; + struct s_fx_efx_ncob_ecob *data2 = (struct s_fx_efx_ncob_ecob *)col->entry; + + TEST_ASSERT_EQUAL_HEX(data2[i].exp_flags, raw_cmp_data2[i].exp_flags); + TEST_ASSERT_EQUAL_HEX(data2[i].fx, be32_to_cpu(raw_cmp_data2[i].fx)); + TEST_ASSERT_EQUAL_HEX(data2[i].efx, be32_to_cpu(raw_cmp_data2[i].efx)); + TEST_ASSERT_EQUAL_HEX(data2[i].ncob_x, be32_to_cpu(raw_cmp_data2[i].ncob_x)); + TEST_ASSERT_EQUAL_HEX(data2[i].ncob_y, be32_to_cpu(raw_cmp_data2[i].ncob_y)); + TEST_ASSERT_EQUAL_HEX(data2[i].ecob_x, be32_to_cpu(raw_cmp_data2[i].ecob_x)); + TEST_ASSERT_EQUAL_HEX(data2[i].ecob_y, be32_to_cpu(raw_cmp_data2[i].ecob_y)); + } + } + { + void *decompressed_data = NULL; + int decmp_size = decompress_cmp_entiy((void *)dst, NULL, NULL, decompressed_data); + TEST_ASSERT_EQUAL_size_t(chunk_size, decmp_size); + decompressed_data = malloc((size_t)decmp_size); TEST_ASSERT_NOT_NULL(decompressed_data); + decmp_size = decompress_cmp_entiy((void *)dst, NULL, NULL, decompressed_data); + + TEST_ASSERT_EQUAL_INT(chunk_size, decmp_size); + TEST_ASSERT_EQUAL_HEX8_ARRAY(chunk, decompressed_data, chunk_size); + } + + /* error cases */ + memset(dst, 0, dst_capacity); + + /* buffer to small for compressed data */ + dst_capacity -=1 ; + dst_capacity = compress_chunk(chunk, chunk_size, NULL, NULL, dst, dst_capacity, &par); + TEST_ASSERT_EQUAL_INT(CMP_ERROR_SMALL_BUF, dst_capacity); + + free(chunk); +} + + +void test_cmp_decmp_chunk_worst_case(void) +{ + struct chunk_def chunk_def[2] = {{DATA_TYPE_S_FX, 2}, {DATA_TYPE_S_FX_EFX_NCOB_ECOB, 3}}; + uint32_t chunk_size; + enum {CHUNK_SIZE_EXP = 2*sizeof(struct s_fx) + 3*sizeof(struct s_fx_efx_ncob_ecob) + 2*COLLECTION_HDR_SIZE}; + void *chunk = NULL; + uint32_t dst[COMPRESS_CHUNK_BOUND(CHUNK_SIZE_EXP, ARRAY_SIZE(chunk_def))/sizeof(uint32_t)]; + int cmp_size_byte = 0; + struct cmp_par par = {0}; + + /* generate test data */ + chunk_size = (uint32_t)generate_random_chunk(chunk, chunk_def, ARRAY_SIZE(chunk_def), &MAX_USED_BITS_SAFE); + TEST_ASSERT_EQUAL_size_t(CHUNK_SIZE_EXP, chunk_size); + chunk = calloc(1, chunk_size); + TEST_ASSERT_NOT_NULL(chunk); + chunk_size = (uint32_t)generate_random_chunk(chunk, chunk_def, ARRAY_SIZE(chunk_def), &MAX_USED_BITS_SAFE); + TEST_ASSERT_EQUAL_size_t(CHUNK_SIZE_EXP, chunk_size); + + /* "compress" data */ + { + size_t cmp_size_byte_exp = NON_IMAGETTE_HEADER_SIZE + 2*CMP_COLLECTION_FILD_SIZE + CHUNK_SIZE_EXP; + + par.cmp_mode = CMP_MODE_DIFF_ZERO; + par.s_exp_flags = 1; + par.s_fx = 1; + par.s_efx = 1; + par.s_ncob = 1; + par.s_ecob = 1; + + + TEST_ASSERT_NOT_NULL(dst); + cmp_size_byte = compress_chunk(chunk, chunk_size, NULL, NULL, dst, sizeof(dst), &par); + TEST_ASSERT_EQUAL_INT(cmp_size_byte_exp, cmp_size_byte); + } + + /* check results */ + { + uint8_t *p = (uint8_t *)dst; + uint16_t cmp_collection_size_exp = cpu_to_be16(2*sizeof(struct s_fx)); + struct collection_hdr *col = (struct collection_hdr *)chunk; + struct s_fx *cmp_data_raw_1; + struct s_fx *data = (void *)col->entry; + int i; + + /* TODO: Check header */ + p += NON_IMAGETTE_HEADER_SIZE; + + TEST_ASSERT_EQUAL_HEX8_ARRAY(&cmp_collection_size_exp, p, CMP_COLLECTION_FILD_SIZE); + p += CMP_COLLECTION_FILD_SIZE; + + TEST_ASSERT(memcmp(col, p, COLLECTION_HDR_SIZE) == 0); + p += COLLECTION_HDR_SIZE; + + cmp_data_raw_1 = (struct s_fx *)p; + TEST_ASSERT_EQUAL_HEX(data[0].exp_flags, cmp_data_raw_1[0].exp_flags); + TEST_ASSERT_EQUAL_HEX(data[0].fx, be32_to_cpu(cmp_data_raw_1[0].fx)); + TEST_ASSERT_EQUAL_HEX(data[1].exp_flags, cmp_data_raw_1[1].exp_flags); + TEST_ASSERT_EQUAL_HEX(data[1].fx, be32_to_cpu(cmp_data_raw_1[1].fx)); + p += 2*sizeof(struct s_fx); + + /* check 2nd collection */ + cmp_collection_size_exp = cpu_to_be16(3*sizeof(struct s_fx_efx_ncob_ecob)); + TEST_ASSERT(memcmp(p, &cmp_collection_size_exp, CMP_COLLECTION_FILD_SIZE) == 0); + p += CMP_COLLECTION_FILD_SIZE; + + col = (struct collection_hdr *) ((char *)col + cmp_col_get_size(col)); + TEST_ASSERT(memcmp(col, p, COLLECTION_HDR_SIZE) == 0); + p += COLLECTION_HDR_SIZE; + + for (i = 0; i < 3; i++) { + struct s_fx_efx_ncob_ecob *raw_cmp_data2 = (struct s_fx_efx_ncob_ecob *)p; + struct s_fx_efx_ncob_ecob *data2 = (struct s_fx_efx_ncob_ecob *)col->entry; + + TEST_ASSERT_EQUAL_HEX(data2[i].exp_flags, raw_cmp_data2[i].exp_flags); + TEST_ASSERT_EQUAL_HEX(data2[i].fx, be32_to_cpu(raw_cmp_data2[i].fx)); + TEST_ASSERT_EQUAL_HEX(data2[i].efx, be32_to_cpu(raw_cmp_data2[i].efx)); + TEST_ASSERT_EQUAL_HEX(data2[i].ncob_x, be32_to_cpu(raw_cmp_data2[i].ncob_x)); + TEST_ASSERT_EQUAL_HEX(data2[i].ncob_y, be32_to_cpu(raw_cmp_data2[i].ncob_y)); + TEST_ASSERT_EQUAL_HEX(data2[i].ecob_x, be32_to_cpu(raw_cmp_data2[i].ecob_x)); + TEST_ASSERT_EQUAL_HEX(data2[i].ecob_y, be32_to_cpu(raw_cmp_data2[i].ecob_y)); + } + } + { + void *decompressed_data = NULL; + int decmp_size = decompress_cmp_entiy((void *)dst, NULL, NULL, decompressed_data); + TEST_ASSERT_EQUAL_size_t(chunk_size, decmp_size); + decompressed_data = malloc((size_t)decmp_size); TEST_ASSERT_NOT_NULL(decompressed_data); + decmp_size = decompress_cmp_entiy((void *)dst, NULL, NULL, decompressed_data); + + TEST_ASSERT_EQUAL_INT(chunk_size, decmp_size); + TEST_ASSERT_EQUAL_HEX8_ARRAY(chunk, decompressed_data, chunk_size); + } + + /* error cases */ + memset(dst, 0, sizeof(dst)); + + /* buffer to small for compressed data */ + cmp_size_byte = compress_chunk(chunk, chunk_size, NULL, NULL, dst, chunk_size, &par); + TEST_ASSERT_EQUAL_INT(CMP_ERROR_SMALL_BUF, cmp_size_byte); + + free(chunk); +} + + +void test_cmp_decmp_diff(void) +{ + struct chunk_def chunk_def[2] = {{DATA_TYPE_S_FX, 2}, {DATA_TYPE_S_FX_EFX_NCOB_ECOB, 3}}; + size_t chunk_size; + void *chunk = NULL; + uint32_t *dst = NULL; + int dst_capacity = 0; + + /* generate test data */ + { + struct s_fx *col_data1; + struct s_fx_efx_ncob_ecob *col_data2; + struct collection_hdr *col; + size_t chunk_size_exp = 2*sizeof(struct s_fx) + 3*sizeof(struct s_fx_efx_ncob_ecob) + 2*COLLECTION_HDR_SIZE; + + chunk_size = generate_random_chunk(chunk, chunk_def, ARRAY_SIZE(chunk_def), &MAX_USED_BITS_SAFE); + TEST_ASSERT_EQUAL_size_t(chunk_size_exp, chunk_size); + chunk = calloc(1, chunk_size); + TEST_ASSERT_NOT_NULL(chunk); + chunk_size = generate_random_chunk(chunk, chunk_def, ARRAY_SIZE(chunk_def), &MAX_USED_BITS_SAFE); + TEST_ASSERT_EQUAL_size_t(chunk_size_exp, chunk_size); + + col = (struct collection_hdr *)chunk; + col_data1 = (struct s_fx *)(col->entry); + col_data1[0].exp_flags = 0; + col_data1[0].fx = 0; + col_data1[1].exp_flags = 1; + col_data1[1].fx = 1; + + col = (struct collection_hdr *)((char *)col + cmp_col_get_size(col)); + col_data2 = (struct s_fx_efx_ncob_ecob *)(col->entry); + col_data2[0].exp_flags = 0; + col_data2[0].fx = 1; + col_data2[0].efx = 2; + col_data2[0].ncob_x = 0; + col_data2[0].ncob_y = 1; + col_data2[0].ecob_x = 3; + col_data2[0].ecob_y = 7; + col_data2[1].exp_flags = 1; + col_data2[1].fx = 1; + col_data2[1].efx = 1; + col_data2[1].ncob_x = 1; + col_data2[1].ncob_y = 2; + col_data2[1].ecob_x = 1; + col_data2[1].ecob_y = 1; + col_data2[2].exp_flags = 2; + col_data2[2].fx = 2; + col_data2[2].efx = 2; + col_data2[2].ncob_x = 2; + col_data2[2].ncob_y = 45; + col_data2[2].ecob_x = 2; + col_data2[2].ecob_y = 2; + } + + + /* compress data */ + { + struct cmp_par par = {0}; + + par.cmp_mode = CMP_MODE_DIFF_ZERO; + par.s_exp_flags = 1; + par.s_fx = 2; + par.s_efx = 3; + par.s_ncob = 4; + par.s_ecob = 5; + + + dst_capacity = compress_chunk(chunk, chunk_size, NULL, NULL, dst, dst_capacity, &par); + TEST_ASSERT_GREATER_THAN_INT(0, dst_capacity); + /* TODO: */ dst_capacity = ROUND_UP_TO_MULTIPLE_OF_4(dst_capacity); + dst = malloc(dst_capacity); + TEST_ASSERT_NOT_NULL(dst); + dst_capacity = compress_chunk(chunk, chunk_size, NULL, NULL, dst, dst_capacity, &par); + TEST_ASSERT_GREATER_THAN_INT(0, dst_capacity); + } + { + void *decompressed_data = NULL; + int decmp_size = decompress_cmp_entiy((void *)dst, NULL, NULL, + decompressed_data); + TEST_ASSERT_EQUAL_size_t(chunk_size, decmp_size); + decompressed_data = malloc((size_t)decmp_size); TEST_ASSERT_NOT_NULL(decompressed_data); + decmp_size = decompress_cmp_entiy((void *)dst, NULL, NULL, + decompressed_data); + + TEST_ASSERT_EQUAL_INT(chunk_size, decmp_size); + TEST_ASSERT_EQUAL_HEX8_ARRAY(chunk, decompressed_data, chunk_size); + } +} diff --git a/test/cmp_entity/test_cmp_entity.c b/test/cmp_entity/test_cmp_entity.c index ec930e53260ae23d23086145f7422eff5d4fd956..5bd6b1ef9297218cee9275da673b76a4a9071c51 100644 --- a/test/cmp_entity/test_cmp_entity.c +++ b/test/cmp_entity/test_cmp_entity.c @@ -431,8 +431,8 @@ void test_cmp_ent_data_type(void) int raw_mode_flag, raw_mode_flag_read; uint8_t *entity_p = (uint8_t *)&ent; - /* test non raw_mode */ - raw_mode_flag = 0; + /* test raw_mode */ + raw_mode_flag = 1; data_type = DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE; error = cmp_ent_set_data_type(&ent, data_type, raw_mode_flag); TEST_ASSERT_FALSE(error); @@ -443,11 +443,11 @@ void test_cmp_ent_data_type(void) TEST_ASSERT_EQUAL(raw_mode_flag, raw_mode_flag_read); /* check the right position in the header */ - TEST_ASSERT_EQUAL_HEX(0, entity_p[22]); + TEST_ASSERT_EQUAL_HEX(0x80, entity_p[22]); TEST_ASSERT_EQUAL_HEX(21, entity_p[23]); - /* test raw_mode */ - raw_mode_flag = 1; + /* test non raw_mode */ + raw_mode_flag = 0; data_type = DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE; error = cmp_ent_set_data_type(&ent, data_type, raw_mode_flag); TEST_ASSERT_FALSE(error); @@ -458,9 +458,10 @@ void test_cmp_ent_data_type(void) TEST_ASSERT_EQUAL(raw_mode_flag, raw_mode_flag_read); /* check the right position in the header */ - TEST_ASSERT_EQUAL_HEX(0x80, entity_p[22]); + TEST_ASSERT_EQUAL_HEX(0, entity_p[22]); TEST_ASSERT_EQUAL_HEX(21, entity_p[23]); + /* error cases */ raw_mode_flag = 0; data_type = 0x8000; diff --git a/test/cmp_icu/test_cmp_icu.c b/test/cmp_icu/test_cmp_icu.c index c5aaed2ac2e003fbccace855e817217deee8aa74..adc12f04c5aa65d66b94274cc2c5b4b734777c5b 100644 --- a/test/cmp_icu/test_cmp_icu.c +++ b/test/cmp_icu/test_cmp_icu.c @@ -32,6 +32,7 @@ #include "../test_common/test_common.h" #include <cmp_icu.h> +#include <cmp_data_types.h> #include "../../lib/icu_compress/cmp_icu.c" /* this is a hack to test static functions */ @@ -1071,11 +1072,11 @@ void test_cmp_cfg_fx_cob(void) void test_cmp_cfg_aux(void) { struct cmp_cfg cfg; uint32_t cmp_par_mean = 2; - uint32_t spillover_mean = 2; - uint32_t cmp_par_variance = 2; - uint32_t spillover_variance = 2; - uint32_t cmp_par_pixels_error = 2; - uint32_t spillover_pixels_error = 2; + uint32_t spillover_mean = 3; + uint32_t cmp_par_variance = 4; + uint32_t spillover_variance = 5; + uint32_t cmp_par_pixels_error = 6; + uint32_t spillover_pixels_error = 7; int error; enum cmp_data_type data_type; @@ -1085,20 +1086,32 @@ void test_cmp_cfg_aux(void) error = cmp_cfg_aux(&cfg, cmp_par_mean, spillover_mean, cmp_par_variance, spillover_variance, cmp_par_pixels_error, spillover_pixels_error); - if (data_type == DATA_TYPE_OFFSET || - data_type == DATA_TYPE_BACKGROUND || - data_type == DATA_TYPE_SMEARING || - data_type == DATA_TYPE_F_CAM_OFFSET || - data_type == DATA_TYPE_F_CAM_BACKGROUND - ) { + if (data_type == DATA_TYPE_OFFSET || data_type == DATA_TYPE_F_CAM_OFFSET) { + TEST_ASSERT_FALSE(error); + TEST_ASSERT_EQUAL_INT(data_type, cfg.data_type); + TEST_ASSERT_EQUAL_INT(2, cfg.cmp_par_offset_mean); + TEST_ASSERT_EQUAL_INT(3, cfg.spill_offset_mean); + TEST_ASSERT_EQUAL_INT(4, cfg.cmp_par_offset_variance); + TEST_ASSERT_EQUAL_INT(5, cfg.spill_offset_variance); + } else if (data_type == DATA_TYPE_BACKGROUND || + data_type == DATA_TYPE_F_CAM_BACKGROUND) { + TEST_ASSERT_FALSE(error); + TEST_ASSERT_EQUAL_INT(data_type, cfg.data_type); + TEST_ASSERT_EQUAL_INT(2, cfg.cmp_par_background_mean); + TEST_ASSERT_EQUAL_INT(3, cfg.spill_background_mean); + TEST_ASSERT_EQUAL_INT(4, cfg.cmp_par_background_variance); + TEST_ASSERT_EQUAL_INT(5, cfg.spill_background_variance); + TEST_ASSERT_EQUAL_INT(6, cfg.cmp_par_background_pixels_error); + TEST_ASSERT_EQUAL_INT(7, cfg.spill_background_pixels_error); + } else if (data_type == DATA_TYPE_SMEARING) { TEST_ASSERT_FALSE(error); TEST_ASSERT_EQUAL_INT(data_type, cfg.data_type); - TEST_ASSERT_EQUAL_INT(2, cfg.cmp_par_mean); - TEST_ASSERT_EQUAL_INT(2, cfg.spill_mean); - TEST_ASSERT_EQUAL_INT(2, cfg.cmp_par_variance); - TEST_ASSERT_EQUAL_INT(2, cfg.spill_variance); - TEST_ASSERT_EQUAL_INT(2, cfg.cmp_par_pixels_error); - TEST_ASSERT_EQUAL_INT(2, cfg.spill_pixels_error); + TEST_ASSERT_EQUAL_INT(2, cfg.cmp_par_smearing_mean); + TEST_ASSERT_EQUAL_INT(3, cfg.spill_smearing_mean); + TEST_ASSERT_EQUAL_INT(4, cfg.cmp_par_smearing_variance); + TEST_ASSERT_EQUAL_INT(5, cfg.spill_smearing_variance); + TEST_ASSERT_EQUAL_INT(6, cfg.cmp_par_smearing_pixels_error); + TEST_ASSERT_EQUAL_INT(7, cfg.spill_smearing_pixels_error); } else { TEST_ASSERT_TRUE(error); } @@ -1124,10 +1137,10 @@ void test_cmp_cfg_aux(void) cmp_par_variance, spillover_variance, cmp_par_pixels_error, spillover_pixels_error); TEST_ASSERT_FALSE(error); - TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_mean); - TEST_ASSERT_EQUAL_INT(cmp_icu_max_spill(MIN_NON_IMA_GOLOMB_PAR), cfg.spill_mean); - TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_variance); - TEST_ASSERT_EQUAL_INT(2, cfg.spill_variance); + TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_offset_mean); + TEST_ASSERT_EQUAL_INT(cmp_icu_max_spill(MIN_NON_IMA_GOLOMB_PAR), cfg.spill_offset_mean); + TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_offset_variance); + TEST_ASSERT_EQUAL_INT(2, cfg.spill_offset_variance); /* This should fail */ cmp_par_mean = MIN_NON_IMA_GOLOMB_PAR-1; @@ -1137,7 +1150,7 @@ void test_cmp_cfg_aux(void) TEST_ASSERT_TRUE(error); - /* DATA_TYPE_BACKGROUND */ + /* DATA_TYPE_F_CAM_OFFSET */ cfg = cmp_cfg_icu_create(DATA_TYPE_F_CAM_OFFSET, CMP_MODE_DIFF_MULTI, 7, CMP_LOSSLESS); cmp_par_mean = MIN_NON_IMA_GOLOMB_PAR; spillover_mean = cmp_icu_max_spill(MIN_NON_IMA_GOLOMB_PAR); @@ -1150,10 +1163,10 @@ void test_cmp_cfg_aux(void) cmp_par_variance, spillover_variance, cmp_par_pixels_error, spillover_pixels_error); TEST_ASSERT_FALSE(error); - TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_mean); - TEST_ASSERT_EQUAL_INT(cmp_icu_max_spill(MIN_NON_IMA_GOLOMB_PAR), cfg.spill_mean); - TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_variance); - TEST_ASSERT_EQUAL_INT(2, cfg.spill_variance); + TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_offset_mean); + TEST_ASSERT_EQUAL_INT(cmp_icu_max_spill(MIN_NON_IMA_GOLOMB_PAR), cfg.spill_offset_mean); + TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_offset_variance); + TEST_ASSERT_EQUAL_INT(2, cfg.spill_offset_variance); /* This should fail */ cmp_par_variance = MIN_NON_IMA_GOLOMB_PAR-1; @@ -1175,12 +1188,12 @@ void test_cmp_cfg_aux(void) cmp_par_variance, spillover_variance, cmp_par_pixels_error, spillover_pixels_error); TEST_ASSERT_FALSE(error); - TEST_ASSERT_EQUAL_INT(MAX_NON_IMA_GOLOMB_PAR, cfg.cmp_par_mean); - TEST_ASSERT_EQUAL_INT(cmp_icu_max_spill(MAX_NON_IMA_GOLOMB_PAR), cfg.spill_mean); - TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_variance); - TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_SPILL, cfg.spill_variance); - TEST_ASSERT_EQUAL_INT(42, cfg.cmp_par_pixels_error); - TEST_ASSERT_EQUAL_INT(23, cfg.spill_pixels_error); + TEST_ASSERT_EQUAL_INT(MAX_NON_IMA_GOLOMB_PAR, cfg.cmp_par_background_mean); + TEST_ASSERT_EQUAL_INT(cmp_icu_max_spill(MAX_NON_IMA_GOLOMB_PAR), cfg.spill_background_mean); + TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_background_variance); + TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_SPILL, cfg.spill_background_variance); + TEST_ASSERT_EQUAL_INT(42, cfg.cmp_par_background_pixels_error); + TEST_ASSERT_EQUAL_INT(23, cfg.spill_background_pixels_error); /* This should fail */ cmp_par_variance = MIN_NON_IMA_GOLOMB_PAR-1; @@ -1203,12 +1216,12 @@ void test_cmp_cfg_aux(void) cmp_par_variance, spillover_variance, cmp_par_pixels_error, spillover_pixels_error); TEST_ASSERT_FALSE(error); - TEST_ASSERT_EQUAL_INT(MAX_NON_IMA_GOLOMB_PAR, cfg.cmp_par_mean); - TEST_ASSERT_EQUAL_INT(cmp_icu_max_spill(MAX_NON_IMA_GOLOMB_PAR), cfg.spill_mean); - TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_variance); - TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_SPILL, cfg.spill_variance); - TEST_ASSERT_EQUAL_INT(42, cfg.cmp_par_pixels_error); - TEST_ASSERT_EQUAL_INT(23, cfg.spill_pixels_error); + TEST_ASSERT_EQUAL_INT(MAX_NON_IMA_GOLOMB_PAR, cfg.cmp_par_background_mean); + TEST_ASSERT_EQUAL_INT(cmp_icu_max_spill(MAX_NON_IMA_GOLOMB_PAR), cfg.spill_background_mean); + TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_background_variance); + TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_SPILL, cfg.spill_background_variance); + TEST_ASSERT_EQUAL_INT(42, cfg.cmp_par_background_pixels_error); + TEST_ASSERT_EQUAL_INT(23, cfg.spill_background_pixels_error); /* This should fail */ cmp_par_variance = MIN_NON_IMA_GOLOMB_PAR-1; @@ -1231,12 +1244,12 @@ void test_cmp_cfg_aux(void) cmp_par_variance, spillover_variance, cmp_par_pixels_error, spillover_pixels_error); TEST_ASSERT_FALSE(error); - TEST_ASSERT_EQUAL_INT(MAX_NON_IMA_GOLOMB_PAR, cfg.cmp_par_mean); - TEST_ASSERT_EQUAL_INT(cmp_icu_max_spill(MAX_NON_IMA_GOLOMB_PAR), cfg.spill_mean); - TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_variance); - TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_SPILL, cfg.spill_variance); - TEST_ASSERT_EQUAL_INT(42, cfg.cmp_par_pixels_error); - TEST_ASSERT_EQUAL_INT(23, cfg.spill_pixels_error); + TEST_ASSERT_EQUAL_INT(MAX_NON_IMA_GOLOMB_PAR, cfg.cmp_par_background_mean); + TEST_ASSERT_EQUAL_INT(cmp_icu_max_spill(MAX_NON_IMA_GOLOMB_PAR), cfg.spill_background_mean); + TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_background_variance); + TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_SPILL, cfg.spill_background_variance); + TEST_ASSERT_EQUAL_INT(42, cfg.cmp_par_background_pixels_error); + TEST_ASSERT_EQUAL_INT(23, cfg.spill_background_pixels_error); /* This should fail */ cmp_par_pixels_error = MIN_NON_IMA_GOLOMB_PAR-1; @@ -1259,12 +1272,12 @@ void test_cmp_cfg_aux(void) cmp_par_variance, spillover_variance, cmp_par_pixels_error, spillover_pixels_error); TEST_ASSERT_FALSE(error); - TEST_ASSERT_EQUAL_INT(MAX_NON_IMA_GOLOMB_PAR, cfg.cmp_par_mean); - TEST_ASSERT_EQUAL_INT(cmp_icu_max_spill(MAX_NON_IMA_GOLOMB_PAR), cfg.spill_mean); - TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_variance); - TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_SPILL, cfg.spill_variance); - TEST_ASSERT_EQUAL_INT(42, cfg.cmp_par_pixels_error); - TEST_ASSERT_EQUAL_INT(23, cfg.spill_pixels_error); + TEST_ASSERT_EQUAL_INT(MAX_NON_IMA_GOLOMB_PAR, cfg.cmp_par_smearing_mean); + TEST_ASSERT_EQUAL_INT(cmp_icu_max_spill(MAX_NON_IMA_GOLOMB_PAR), cfg.spill_smearing_mean); + TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_GOLOMB_PAR, cfg.cmp_par_smearing_variance); + TEST_ASSERT_EQUAL_INT(MIN_NON_IMA_SPILL, cfg.spill_smearing_variance); + TEST_ASSERT_EQUAL_INT(42, cfg.cmp_par_smearing_pixels_error); + TEST_ASSERT_EQUAL_INT(23, cfg.spill_smearing_pixels_error); /* This should fail */ spillover_pixels_error = cmp_icu_max_spill(42)+1; @@ -1468,13 +1481,13 @@ void test_put_n_bits32(void) rval = put_n_bits32(v, n, o, testarray1, l); TEST_ASSERT_EQUAL_INT(1, rval); - TEST_ASSERT(testarray1[0] == 0x7fffffff); + TEST_ASSERT(testarray1[0] == cpu_to_be32(0x7fffffff)); /* left border, write 1 */ v = 1; n = 1; o = 0; rval = put_n_bits32(v, n, o, testarray0, l); TEST_ASSERT_EQUAL_INT(1, rval); - TEST_ASSERT(testarray0[0] == 0x80000000); + TEST_ASSERT(testarray0[0] == cpu_to_be32(0x80000000)); rval = put_n_bits32(v, n, o, testarray1, l); TEST_ASSERT_EQUAL_INT(1, rval); @@ -1484,12 +1497,12 @@ void test_put_n_bits32(void) v = 0xf0f0abcd; n = 32; o = 0; rval = put_n_bits32(v, n, o, testarray0, l); TEST_ASSERT_EQUAL_INT(rval, 32); - TEST_ASSERT(testarray0[0] == 0xf0f0abcd); + TEST_ASSERT(testarray0[0] == cpu_to_be32(0xf0f0abcd)); TEST_ASSERT(testarray0[1] == 0); rval = put_n_bits32(v, n, o, testarray1, l); TEST_ASSERT_EQUAL_INT(rval, 32); - TEST_ASSERT(testarray1[0] == 0xf0f0abcd); + TEST_ASSERT(testarray1[0] == cpu_to_be32(0xf0f0abcd)); TEST_ASSERT(testarray1[1] == 0xffffffff); /* re-init input arrays after clobbering */ init_PB32_arrays(testarray0, testarray1); @@ -1498,7 +1511,7 @@ void test_put_n_bits32(void) v = 3; n = 2; o = 29; rval = put_n_bits32(v, n, o, testarray0, l); TEST_ASSERT_EQUAL_INT(rval, 31); - TEST_ASSERT(testarray0[0] == 0x6); + TEST_ASSERT(testarray0[0] == cpu_to_be32(0x6)); rval = put_n_bits32(v, n, o, testarray1, l); TEST_ASSERT(testarray1[0] == 0xffffffff); @@ -1515,7 +1528,7 @@ void test_put_n_bits32(void) TEST_ASSERT(testarray0[0] == 0); rval = put_n_bits32(v, n, o, testarray1, l); - TEST_ASSERT(testarray1[0] == 0x07ffffff); + TEST_ASSERT(testarray1[0] == cpu_to_be32(0x07ffffff)); TEST_ASSERT_EQUAL_INT(rval, 5); /* re-init input arrays after clobbering */ init_PB32_arrays(testarray0, testarray1); @@ -1524,7 +1537,7 @@ void test_put_n_bits32(void) v = 0x1f; n = 5; o = 0; rval = put_n_bits32(v, n, o, testarray0, l); TEST_ASSERT_EQUAL_INT(rval, 5); - TEST_ASSERT(testarray0[0] == 0xf8000000); + TEST_ASSERT(testarray0[0] == cpu_to_be32(0xf8000000)); rval = put_n_bits32(v, n, o, testarray1, l); TEST_ASSERT_EQUAL_INT(rval, 5); @@ -1540,7 +1553,7 @@ void test_put_n_bits32(void) rval = put_n_bits32(v, n, o, testarray1, l); TEST_ASSERT_EQUAL_INT(rval, 12); - TEST_ASSERT(testarray1[0] == 0xfe0fffff); + TEST_ASSERT(testarray1[0] == cpu_to_be32(0xfe0fffff)); /* re-init input arrays after clobbering */ init_PB32_arrays(testarray0, testarray1); @@ -1548,7 +1561,7 @@ void test_put_n_bits32(void) v = 0x1f; n = 5; o = 7; rval = put_n_bits32(v, n, o, testarray0, l); TEST_ASSERT_EQUAL_INT(rval, 12); - TEST_ASSERT(testarray0[0] == 0x01f00000); + TEST_ASSERT(testarray0[0] == cpu_to_be32(0x01f00000)); rval = put_n_bits32(v, n, o, testarray1, l); TEST_ASSERT_EQUAL_INT(rval, 12); @@ -1568,7 +1581,7 @@ void test_put_n_bits32(void) TEST_ASSERT_EQUAL_INT(rval, 96); TEST_ASSERT(testarray1[0] == 0xffffffff); TEST_ASSERT(testarray1[1] == 0xffffffff); - TEST_ASSERT(testarray1[2] == 0xffffffe0); + TEST_ASSERT(testarray1[2] == cpu_to_be32(0xffffffe0)); /* re-init input arrays after clobbering */ init_PB32_arrays(testarray0, testarray1); @@ -1578,7 +1591,7 @@ void test_put_n_bits32(void) TEST_ASSERT_EQUAL_INT(rval, 96); TEST_ASSERT(testarray0[0] == 0); TEST_ASSERT(testarray0[1] == 0); - TEST_ASSERT(testarray0[2] == 0x0000001f); + TEST_ASSERT(testarray0[2] == cpu_to_be32(0x0000001f)); rval = put_n_bits32(v, n, o, testarray1, l); TEST_ASSERT_EQUAL_INT(rval, 96); @@ -1624,8 +1637,8 @@ void test_put_n_bits32(void) rval = put_n_bits32(v, n, o, testarray1, l); TEST_ASSERT_EQUAL_INT(rval, 67); TEST_ASSERT(testarray1[0] == 0xffffffff); - TEST_ASSERT(testarray1[1] == 0xfffffffc); - TEST_ASSERT(testarray1[2] == 0x1fffffff); + TEST_ASSERT(testarray1[1] == cpu_to_be32(0xfffffffc)); + TEST_ASSERT(testarray1[2] == cpu_to_be32(0x1fffffff)); /* re-init input arrays after clobbering */ init_PB32_arrays(testarray0, testarray1); @@ -1634,8 +1647,8 @@ void test_put_n_bits32(void) rval = put_n_bits32(v, n, o, testarray0, l); TEST_ASSERT_EQUAL_INT(rval, 67); TEST_ASSERT(testarray0[0] == 0); - TEST_ASSERT(testarray0[1] == 3); - TEST_ASSERT(testarray0[2] == 0xe0000000); + TEST_ASSERT(testarray0[1] == cpu_to_be32(3)); + TEST_ASSERT(testarray0[2] == cpu_to_be32(0xe0000000)); rval = put_n_bits32(v, n, o, testarray1, l); TEST_ASSERT_EQUAL_INT(rval, 67); @@ -1654,8 +1667,8 @@ void test_put_n_bits32(void) rval = put_n_bits32(v, n, o, testarray1, l); TEST_ASSERT_EQUAL_INT(rval, 33); - TEST_ASSERT(testarray1[0] == 0x80000000); - TEST_ASSERT(testarray1[1] == 0x7fffffff); + TEST_ASSERT(testarray1[0] == cpu_to_be32(0x80000000)); + TEST_ASSERT(testarray1[1] == cpu_to_be32(0x7fffffff)); /* re-init input arrays after clobbering */ init_PB32_arrays(testarray0, testarray1); @@ -1663,8 +1676,8 @@ void test_put_n_bits32(void) v = 0xffffffff; n = 32; o = 1; rval = put_n_bits32(v, n, o, testarray0, l); TEST_ASSERT_EQUAL_INT(rval, 33); - TEST_ASSERT(testarray0[0] == 0x7fffffff); - TEST_ASSERT(testarray0[1] == 0x80000000); + TEST_ASSERT(testarray0[0] == cpu_to_be32(0x7fffffff)); + TEST_ASSERT(testarray0[1] == cpu_to_be32(0x80000000)); rval = put_n_bits32(v, n, o, testarray1, l); TEST_ASSERT_EQUAL_INT(rval, 33); @@ -1694,7 +1707,7 @@ void test_put_n_bits32(void) v = 0x7f; n = 6; o = 10; rval = put_n_bits32(v, n, o, testarray0, l); TEST_ASSERT_EQUAL_INT(16, rval); - TEST_ASSERT(testarray0[0] == 0x003f0000); + TEST_ASSERT(testarray0[0] == cpu_to_be32(0x003f0000)); TEST_ASSERT(testarray0[1] == 0); rval = put_n_bits32(v, n, o, testarray1, l); @@ -1710,7 +1723,7 @@ void test_put_n_bits32(void) v = 0xffffffff; n = 6; o = 10; rval = put_n_bits32(v, n, o, testarray0, l); TEST_ASSERT_EQUAL_INT(16, rval); - TEST_ASSERT(testarray0[0] == 0x003f0000); + TEST_ASSERT(testarray0[0] == cpu_to_be32(0x003f0000)); TEST_ASSERT(testarray0[1] == 0); rval = put_n_bits32(v, n, o, testarray1, l); @@ -2016,41 +2029,41 @@ void test_encode_value_zero(void) data = 0; model = 0; stream_len = encode_value_zero(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(2, stream_len); - TEST_ASSERT_EQUAL_HEX(0x80000000, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[2]); + TEST_ASSERT_EQUAL_HEX(0x80000000, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[2])); data = 5; model = 0; stream_len = encode_value_zero(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(14, stream_len); - TEST_ASSERT_EQUAL_HEX(0xBFF80000, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[2]); + TEST_ASSERT_EQUAL_HEX(0xBFF80000, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[2])); data = 2; model = 7; stream_len = encode_value_zero(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(25, stream_len); - TEST_ASSERT_EQUAL_HEX(0xBFFBFF00, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[2]); + TEST_ASSERT_EQUAL_HEX(0xBFFBFF00, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[2])); /* zero escape mechanism */ data = 100; model = 42; /* (100-42)*2+1=117 -> cw 0 + 0x0000_0000_0000_0075 */ stream_len = encode_value_zero(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(58, stream_len); - TEST_ASSERT_EQUAL_HEX(0xBFFBFF00, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0x00001D40, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[2]); + TEST_ASSERT_EQUAL_HEX(0xBFFBFF00, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0x00001D40, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[2])); /* test overflow */ data = (uint32_t)INT32_MIN; model = 0; /* (INT32_MIN)*-2-1+1=0(overflow) -> cw 0 + 0x0000_0000_0000_0000 */ stream_len = encode_value_zero(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(91, stream_len); - TEST_ASSERT_EQUAL_HEX(0xBFFBFF00, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0x00001D40, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[2]); + TEST_ASSERT_EQUAL_HEX(0xBFFBFF00, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0x00001D40, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[2])); /* small buffer error */ data = 23; model = 26; @@ -2070,33 +2083,33 @@ void test_encode_value_zero(void) data = 53; model = 38; stream_len = encode_value_zero(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(32, stream_len); - TEST_ASSERT_EQUAL_HEX(0xFFFFFFFE, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, bitstream[2]); + TEST_ASSERT_EQUAL_HEX(0xFFFFFFFE, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, be32_to_cpu(bitstream[2])); /* lowest value with zero encoding */ data = 0; model = 16; stream_len = encode_value_zero(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(39, stream_len); - TEST_ASSERT_EQUAL_HEX(0xFFFFFFFE, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0x41FFFFFF, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, bitstream[2]); + TEST_ASSERT_EQUAL_HEX(0xFFFFFFFE, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0x41FFFFFF, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, be32_to_cpu(bitstream[2])); /* maximum positive value to encode */ data = 31; model = 0; stream_len = encode_value_zero(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(46, stream_len); - TEST_ASSERT_EQUAL_HEX(0xFFFFFFFE, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0x40FFFFFF, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, bitstream[2]); + TEST_ASSERT_EQUAL_HEX(0xFFFFFFFE, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0x40FFFFFF, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, be32_to_cpu(bitstream[2])); /* maximum negative value to encode */ data = 0; model = 32; stream_len = encode_value_zero(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(53, stream_len); - TEST_ASSERT_EQUAL_HEX(0xFFFFFFFE, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0x40FC07FF, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, bitstream[2]); + TEST_ASSERT_EQUAL_HEX(0xFFFFFFFE, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0x40FC07FF, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, be32_to_cpu(bitstream[2])); /* small buffer error when creating the zero escape symbol*/ bitstream[0] = 0; @@ -2107,9 +2120,9 @@ void test_encode_value_zero(void) data = 31; model = 0; stream_len = encode_value_zero(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(CMP_ERROR_SMALL_BUF, stream_len); - TEST_ASSERT_EQUAL_HEX(0, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0, bitstream[2]); + TEST_ASSERT_EQUAL_HEX(0, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0, be32_to_cpu(bitstream[2])); } @@ -2138,53 +2151,53 @@ void test_encode_value_multi(void) data = 0; model = 0; stream_len = encode_value_multi(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(1, stream_len); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[2]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[3]); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[2])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[3])); data = 0; model = 1; stream_len = encode_value_multi(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(3, stream_len); - TEST_ASSERT_EQUAL_HEX(0x40000000, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[2]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[3]); + TEST_ASSERT_EQUAL_HEX(0x40000000, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[2])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[3])); data = 1+23; model = 0+23; stream_len = encode_value_multi(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(6, stream_len); - TEST_ASSERT_EQUAL_HEX(0x58000000, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[2]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[3]); + TEST_ASSERT_EQUAL_HEX(0x58000000, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[2])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[3])); /* highest value without multi outlier encoding */ data = 0+42; model = 8+42; stream_len = encode_value_multi(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(22, stream_len); - TEST_ASSERT_EQUAL_HEX(0x5BFFF800, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[2]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[3]); + TEST_ASSERT_EQUAL_HEX(0x5BFFF800, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[2])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[3])); /* lowest value with multi outlier encoding */ data = 8+42; model = 0+42; stream_len = encode_value_multi(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(41, stream_len); - TEST_ASSERT_EQUAL_HEX(0x5BFFFBFF, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0xFC000000, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[2]); - TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[3]); + TEST_ASSERT_EQUAL_HEX(0x5BFFFBFF, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0xFC000000, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[2])); + TEST_ASSERT_EQUAL_HEX(0x00000000, be32_to_cpu(bitstream[3])); /* highest value with multi outlier encoding */ data = (uint32_t)INT32_MIN; model = 0; stream_len = encode_value_multi(data, model, stream_len, &setup); TEST_ASSERT_EQUAL_INT(105, stream_len); - TEST_ASSERT_EQUAL_HEX(0x5BFFFBFF, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0xFC7FFFFF, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0xFF7FFFFF, bitstream[2]); - TEST_ASSERT_EQUAL_HEX(0xF7800000, bitstream[3]); + TEST_ASSERT_EQUAL_HEX(0x5BFFFBFF, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0xFC7FFFFF, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0xFF7FFFFF, be32_to_cpu(bitstream[2])); + TEST_ASSERT_EQUAL_HEX(0xF7800000, be32_to_cpu(bitstream[3])); /* small buffer error */ data = 0; model = 38; @@ -2248,8 +2261,8 @@ void test_encode_value(void) cmp_size = encode_value(data, model, cmp_size, &setup); TEST_ASSERT_EQUAL_INT(96, cmp_size); TEST_ASSERT_EQUAL_HEX(0, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0x7FFFFFFF, bitstream[2]); + TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0x7FFFFFFF, be32_to_cpu(bitstream[2])); TEST_ASSERT_EQUAL_HEX(0, bitstream[3]); setup.lossy_par = 2; @@ -2258,7 +2271,7 @@ void test_encode_value(void) TEST_ASSERT_EQUAL_INT(128, cmp_size); TEST_ASSERT_EQUAL_HEX(0, bitstream[0]); TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0x7FFFFFFF, bitstream[2]); + TEST_ASSERT_EQUAL_HEX(0x7FFFFFFF, be32_to_cpu(bitstream[2])); TEST_ASSERT_EQUAL_HEX(0x00000000, bitstream[3]); /* small buffer error bitstream can not hold more data*/ @@ -2288,8 +2301,8 @@ void test_encode_value(void) data = 0x7FFFFFFF; model = 0; cmp_size = encode_value(data, model, cmp_size, &setup); TEST_ASSERT_EQUAL_INT(62, cmp_size); - TEST_ASSERT_EQUAL_HEX(0x00000001, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0xFFFFFFFC, bitstream[1]); + TEST_ASSERT_EQUAL_HEX(0x00000001, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0xFFFFFFFC, be32_to_cpu(bitstream[1])); TEST_ASSERT_EQUAL_HEX(0, bitstream[2]); TEST_ASSERT_EQUAL_HEX(0, bitstream[3]); @@ -2298,9 +2311,9 @@ void test_encode_value(void) data = UINT32_MAX; model = UINT32_MAX; cmp_size = encode_value(data, model, cmp_size, &setup); TEST_ASSERT_EQUAL_INT(93, cmp_size); - TEST_ASSERT_EQUAL_HEX(0x00000001, bitstream[0]); - TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, bitstream[1]); - TEST_ASSERT_EQUAL_HEX(0xFFFFFFF8, bitstream[2]); + TEST_ASSERT_EQUAL_HEX(0x00000001, be32_to_cpu(bitstream[0])); + TEST_ASSERT_EQUAL_HEX(0xFFFFFFFF, be32_to_cpu(bitstream[1])); + TEST_ASSERT_EQUAL_HEX(0xFFFFFFF8, be32_to_cpu(bitstream[2])); TEST_ASSERT_EQUAL_HEX(0, bitstream[3]); /* data are bigger than max_data_bits */ @@ -2677,12 +2690,12 @@ void test_compress_imagette_error_cases(void) TEST_ASSERT_EQUAL_INT(-1, cmp_size); } - +#if 0 /** * @test compress_multi_entry_hdr */ -void test_compress_multi_entry_hdr(void) +void no_test_compress_multi_entry_hdr(void) { int stream_len; uint8_t data[COLLECTION_HDR_SIZE]; @@ -2759,6 +2772,7 @@ void test_compress_multi_entry_hdr(void) TEST_ASSERT_EQUAL(model_p-model, COLLECTION_HDR_SIZE); TEST_ASSERT_EQUAL(up_model_p-up_model, COLLECTION_HDR_SIZE); } +#endif void test_compress_s_fx_raw(void) @@ -4258,7 +4272,7 @@ void test_pad_bitstream(void) memset(cmp_data, 0xFF, sizeof(cmp_data)); cfg.icu_output_buf = cmp_data; cfg.data_type = DATA_TYPE_IMAGETTE; /* 16 bit samples */ - cfg.buffer_length = 6; /* 6 * 16 bit samples -> 3 * 32 bit */ + cfg.buffer_length = sizeof(cmp_data); /* 6 * 16 bit samples -> 3 * 32 bit */ /* test negative cmp_size */ cmp_size = -1; @@ -4308,7 +4322,7 @@ void test_pad_bitstream(void) /* error case the rest of the compressed data are to small dor a 32 bit * access */ - cfg.buffer_length = 5; + cfg.buffer_length -= 1; cmp_size = 64; cmp_size = put_n_bits32(0, 1, cmp_size, cfg.icu_output_buf, MAX_BIT_LEN); cmp_size_return = pad_bitstream(&cfg, cmp_size); @@ -4317,10 +4331,328 @@ void test_pad_bitstream(void) /** - * @test cmp_data_to_big_endian + * @test compress_chunk */ -void test_cmp_data_to_big_endian_error_cases(void) +void test_compress_chunk_raw_singel_col(void) +{ + enum { DATA_SIZE = 2*sizeof(struct s_fx), + CHUNK_SIZE = COLLECTION_HDR_SIZE + DATA_SIZE + }; + uint8_t chunk[CHUNK_SIZE]; + struct collection_hdr *col = (struct collection_hdr *)chunk; + struct s_fx *data = (struct s_fx *)col->entry; + struct cmp_par cmp_par = {0}; + uint32_t *dst; + int cmp_size; + size_t dst_capacity = 43; /* random non zero value */ + + /* create a chunk with a single collection */ + memset(col, 0, COLLECTION_HDR_SIZE); + TEST_ASSERT_FALSE(cmp_col_set_subservice(col, SST_NCxx_S_SCIENCE_S_FX)); + TEST_ASSERT_FALSE(cmp_col_set_data_length(col, DATA_SIZE)); + data[0].exp_flags = 0; + data[0].fx = 1; + data[1].exp_flags = 0xF0; + data[1].fx = 0xABCDE0FF; + + + /* compress the data */ + cmp_par.cmp_mode = CMP_MODE_RAW; + dst = NULL; + + cmp_size = compress_chunk(chunk, CHUNK_SIZE, NULL, NULL, dst, + dst_capacity, &cmp_par); + TEST_ASSERT_EQUAL_INT(GENERIC_HEADER_SIZE + CHUNK_SIZE, cmp_size); + dst_capacity = (size_t)cmp_size; + dst = malloc(dst_capacity); TEST_ASSERT_NOT_NULL(dst); + cmp_size = compress_chunk(chunk, CHUNK_SIZE, NULL, NULL, dst, + dst_capacity, &cmp_par); + TEST_ASSERT_EQUAL_INT(GENERIC_HEADER_SIZE + CHUNK_SIZE, cmp_size); + + /* test results */ + { struct cmp_entity *ent = (struct cmp_entity *)dst; + struct s_fx *raw_cmp_data = (struct s_fx *)( + (uint8_t *)cmp_ent_get_data_buf(ent) + COLLECTION_HDR_SIZE); + + TEST_ASSERT_EQUAL_UINT(CHUNK_SIZE, cmp_ent_get_cmp_data_size(ent)); + TEST_ASSERT_EQUAL_UINT(CHUNK_SIZE, cmp_ent_get_original_size(ent)); + TEST_ASSERT_EQUAL_UINT(cmp_par.cmp_mode, cmp_ent_get_cmp_mode(ent)); + TEST_ASSERT_TRUE(cmp_ent_get_data_type_raw_bit(ent)); + TEST_ASSERT_EQUAL_INT(DATA_TYPE_CHUNK, cmp_ent_get_data_type(ent)); + + TEST_ASSERT_EQUAL_HEX8_ARRAY(col, cmp_ent_get_data_buf(ent), COLLECTION_HDR_SIZE); + + TEST_ASSERT_EQUAL_HEX(data[0].exp_flags, raw_cmp_data[0].exp_flags); + TEST_ASSERT_EQUAL_HEX(data[0].fx, be32_to_cpu(raw_cmp_data[0].fx)); + TEST_ASSERT_EQUAL_HEX(data[1].exp_flags, raw_cmp_data[1].exp_flags); + TEST_ASSERT_EQUAL_HEX(data[1].fx, be32_to_cpu(raw_cmp_data[1].fx)); + } + free(dst); + + /* error case: dst buffer to small */ + dst_capacity -= 1; + dst = malloc(dst_capacity); TEST_ASSERT_NOT_NULL(dst); + cmp_size = compress_chunk(chunk, CHUNK_SIZE, NULL, NULL, dst, + dst_capacity, &cmp_par); + TEST_ASSERT_EQUAL_INT(CMP_ERROR_SMALL_BUF, cmp_size); + free(dst); +} + + +void test_compress_chunk_raw_two_col(void) +{ + enum { DATA_SIZE_1 = 2*sizeof(struct s_fx), + DATA_SIZE_2 = 3*sizeof(struct s_fx_efx_ncob_ecob), + CHUNK_SIZE = 2*COLLECTION_HDR_SIZE + DATA_SIZE_1 + DATA_SIZE_2 + }; + uint8_t chunk[CHUNK_SIZE]; + struct collection_hdr *col1 = (struct collection_hdr *)chunk; + struct collection_hdr *col2; + struct s_fx *data1 = (struct s_fx *)col1->entry; + struct s_fx_efx_ncob_ecob *data2; + struct cmp_par cmp_par = {0}; + uint32_t *dst; + int cmp_size; + size_t dst_capacity = 0; + + /* create a chunk with two collection */ + memset(col1, 0, COLLECTION_HDR_SIZE); + TEST_ASSERT_FALSE(cmp_col_set_subservice(col1, SST_NCxx_S_SCIENCE_S_FX)); + TEST_ASSERT_FALSE(cmp_col_set_data_length(col1, DATA_SIZE_1)); + data1[0].exp_flags = 0; + data1[0].fx = 1; + data1[1].exp_flags = 0xF0; + data1[1].fx = 0xABCDE0FF; + col2 = (struct collection_hdr *)(chunk + COLLECTION_HDR_SIZE + DATA_SIZE_1); + memset(col2, 0, COLLECTION_HDR_SIZE); + TEST_ASSERT_FALSE(cmp_col_set_subservice(col2, SST_NCxx_S_SCIENCE_S_FX_EFX_NCOB_ECOB)); + TEST_ASSERT_FALSE(cmp_col_set_data_length(col2, DATA_SIZE_2)); + data2 = (struct s_fx_efx_ncob_ecob *)col2->entry; + data2[0].exp_flags = 1; + data2[0].fx = 2; + data2[0].efx = 3; + data2[0].ncob_x = 4; + data2[0].ncob_y = 5; + data2[0].ecob_x = 6; + data2[0].ecob_y = 7; + data2[1].exp_flags = 0; + data2[1].fx = 0; + data2[1].efx = 0; + data2[1].ncob_x = 0; + data2[1].ncob_y = 0; + data2[1].ecob_x = 0; + data2[1].ecob_y = 0; + data2[2].exp_flags = 0xF; + data2[2].fx = ~0U; + data2[2].efx = ~0U; + data2[2].ncob_x = ~0U; + data2[2].ncob_y = ~0U; + data2[2].ecob_x = ~0U; + data2[2].ecob_y = ~0U; + + /* compress the data */ + cmp_par.cmp_mode = CMP_MODE_RAW; + dst = NULL; + + cmp_size = compress_chunk(chunk, CHUNK_SIZE, NULL, NULL, dst, + dst_capacity, &cmp_par); + TEST_ASSERT_EQUAL_INT(GENERIC_HEADER_SIZE + CHUNK_SIZE, cmp_size); + dst_capacity = (size_t)cmp_size; + dst = malloc(dst_capacity); TEST_ASSERT_NOT_NULL(dst); + cmp_size = compress_chunk(chunk, CHUNK_SIZE, NULL, NULL, dst, + dst_capacity, &cmp_par); + TEST_ASSERT_EQUAL_INT(GENERIC_HEADER_SIZE + CHUNK_SIZE, cmp_size); + + /* test results */ + { struct cmp_entity *ent = (struct cmp_entity *)dst; + struct s_fx *raw_cmp_data1 = (struct s_fx *)( + (uint8_t *)cmp_ent_get_data_buf(ent) + COLLECTION_HDR_SIZE); + struct s_fx_efx_ncob_ecob *raw_cmp_data2 = (struct s_fx_efx_ncob_ecob *)( + (uint8_t *)cmp_ent_get_data_buf(ent) + 2*COLLECTION_HDR_SIZE + + DATA_SIZE_1); + int i; + TEST_ASSERT_EQUAL_UINT(CHUNK_SIZE, cmp_ent_get_cmp_data_size(ent)); + TEST_ASSERT_EQUAL_UINT(CHUNK_SIZE, cmp_ent_get_original_size(ent)); + TEST_ASSERT_EQUAL_UINT(cmp_par.cmp_mode, cmp_ent_get_cmp_mode(ent)); + TEST_ASSERT_TRUE(cmp_ent_get_data_type_raw_bit(ent)); + TEST_ASSERT_EQUAL_INT(DATA_TYPE_CHUNK, cmp_ent_get_data_type(ent)); + + TEST_ASSERT_EQUAL_HEX8_ARRAY(col1, cmp_ent_get_data_buf(ent), COLLECTION_HDR_SIZE); + + for (i = 0; i < 2; i++) { + TEST_ASSERT_EQUAL_HEX(data1[i].exp_flags, raw_cmp_data1[i].exp_flags); + TEST_ASSERT_EQUAL_HEX(data1[i].fx, be32_to_cpu(raw_cmp_data1[i].fx)); + } + + TEST_ASSERT_EQUAL_HEX8_ARRAY(col1, cmp_ent_get_data_buf(ent), COLLECTION_HDR_SIZE); + + for (i = 0; i < 2; i++) { + TEST_ASSERT_EQUAL_HEX(data1[i].exp_flags, raw_cmp_data1[i].exp_flags); + TEST_ASSERT_EQUAL_HEX(data1[i].fx, be32_to_cpu(raw_cmp_data1[i].fx)); + } + + TEST_ASSERT_EQUAL_HEX8_ARRAY(col2, (uint8_t *)cmp_ent_get_data_buf(ent)+cmp_col_get_size(col1), COLLECTION_HDR_SIZE); + + for (i = 0; i < 2; i++) { + TEST_ASSERT_EQUAL_HEX(data2[i].exp_flags, raw_cmp_data2[i].exp_flags); + TEST_ASSERT_EQUAL_HEX(data2[i].fx, be32_to_cpu(raw_cmp_data2[i].fx)); + TEST_ASSERT_EQUAL_HEX(data2[i].efx, be32_to_cpu(raw_cmp_data2[i].efx)); + TEST_ASSERT_EQUAL_HEX(data2[i].ncob_x, be32_to_cpu(raw_cmp_data2[i].ncob_x)); + TEST_ASSERT_EQUAL_HEX(data2[i].ncob_y, be32_to_cpu(raw_cmp_data2[i].ncob_y)); + TEST_ASSERT_EQUAL_HEX(data2[i].ecob_x, be32_to_cpu(raw_cmp_data2[i].ecob_x)); + TEST_ASSERT_EQUAL_HEX(data2[i].ecob_y, be32_to_cpu(raw_cmp_data2[i].ecob_y)); + } + } + free(dst); + + /* error case: dst buffer to small */ + dst_capacity -= 1; + dst = malloc(dst_capacity); TEST_ASSERT_NOT_NULL(dst); + cmp_size = compress_chunk(chunk, CHUNK_SIZE, NULL, NULL, dst, + dst_capacity, &cmp_par); + TEST_ASSERT_EQUAL_INT(CMP_ERROR_SMALL_BUF, cmp_size); + free(dst); +} + +void NOOO_test_compress_chunk_model(void) +{ + enum { DATA_SIZE_1 = 1*sizeof(struct background), + DATA_SIZE_2 = 2*sizeof(struct offset), + CHUNK_SIZE = 2*COLLECTION_HDR_SIZE + DATA_SIZE_1 + DATA_SIZE_2 + }; + uint8_t chunk[CHUNK_SIZE]; + uint8_t chunk_model[CHUNK_SIZE]; + uint8_t chunk_up_model[CHUNK_SIZE]; + struct collection_hdr *col1 = (struct collection_hdr *)chunk; + struct collection_hdr *col2; + struct background *data1 = (struct background *)col1->entry; + struct offset *data2; + struct cmp_par cmp_par = {0}; + uint32_t *dst; + int cmp_size; + size_t dst_capacity = 0; + + /* create a chunk with two collection */ + memset(col1, 0, COLLECTION_HDR_SIZE); + TEST_ASSERT_FALSE(cmp_col_set_subservice(col1, SST_NCxx_S_SCIENCE_BACKGROUND)); + TEST_ASSERT_FALSE(cmp_col_set_data_length(col1, DATA_SIZE_1)); + data1[0].mean = 0; + data1[0].variance = 1; + data1[0].outlier_pixels = 0xF0; + col2 = (struct collection_hdr *)(chunk + COLLECTION_HDR_SIZE + DATA_SIZE_1); + memset(col2, 0, COLLECTION_HDR_SIZE); + TEST_ASSERT_FALSE(cmp_col_set_subservice(col2, SST_NCxx_S_SCIENCE_OFFSET)); + TEST_ASSERT_FALSE(cmp_col_set_data_length(col2, DATA_SIZE_2)); + data2 = (struct offset *)col2->entry; + data2[0].mean = 1; + data2[0].variance = 2; + data2[1].mean = 3; + data2[1].variance = 4; + + /* create a model with two collection */ + col1 = (struct collection_hdr *)chunk_model; + memset(col1, 0, COLLECTION_HDR_SIZE); + TEST_ASSERT_FALSE(cmp_col_set_subservice(col1, SST_NCxx_S_SCIENCE_BACKGROUND)); + TEST_ASSERT_FALSE(cmp_col_set_data_length(col1, DATA_SIZE_1)); + data1[0].mean = 1; + data1[0].variance = 2; + data1[0].outlier_pixels = 0xFFFF; + col2 = (struct collection_hdr *)(chunk + COLLECTION_HDR_SIZE + DATA_SIZE_1); + memset(col2, 0, COLLECTION_HDR_SIZE); + TEST_ASSERT_FALSE(cmp_col_set_subservice(col2, SST_NCxx_S_SCIENCE_OFFSET)); + TEST_ASSERT_FALSE(cmp_col_set_data_length(col2, DATA_SIZE_2)); + data2 = (struct offset *)col2->entry; + data2[0].mean = 0; + data2[0].variance = 0; + data2[1].mean = 0; + data2[1].variance = 0xEFFFFFFF; + + /* compress the data */ + cmp_par.cmp_mode = CMP_MODE_MODEL_ZERO; + cmp_par.model_value = 14; + cmp_par.nc_offset_mean = 1; + cmp_par.nc_offset_variance = 2; + cmp_par.nc_background_mean = 3; + cmp_par.nc_background_variance = 4; + cmp_par.nc_background_outlier_pixels = 5; + dst = NULL; + + uint32_t chunk_size = COLLECTION_HDR_SIZE + DATA_SIZE_1; + /* chunk_size = CHUNK_SIZE; */ + /* int */ + + cmp_size = compress_chunk(chunk, chunk_size, chunk_model, chunk_up_model, dst, + dst_capacity, &cmp_par); + TEST_ASSERT_EQUAL_INT(NON_IMAGETTE_HEADER_SIZE + COLLECTION_HDR_SIZE + 4, cmp_size); + dst_capacity = (size_t)cmp_size; + dst = malloc(dst_capacity); TEST_ASSERT_NOT_NULL(dst); + cmp_size = compress_chunk(chunk, CHUNK_SIZE, NULL, NULL, dst, + dst_capacity, &cmp_par); + TEST_ASSERT_EQUAL_INT(GENERIC_HEADER_SIZE + CHUNK_SIZE, cmp_size); + + /* test results */ + { struct cmp_entity *ent = (struct cmp_entity *)dst; + struct s_fx *raw_cmp_data1 = (struct s_fx *)( + (uint8_t *)cmp_ent_get_data_buf(ent) + COLLECTION_HDR_SIZE); + struct s_fx_efx_ncob_ecob *raw_cmp_data2 = (struct s_fx_efx_ncob_ecob *)( + (uint8_t *)cmp_ent_get_data_buf(ent) + 2*COLLECTION_HDR_SIZE + + DATA_SIZE_1); + int i; + TEST_ASSERT_EQUAL_UINT(CHUNK_SIZE, cmp_ent_get_cmp_data_size(ent)); + TEST_ASSERT_EQUAL_UINT(CHUNK_SIZE, cmp_ent_get_original_size(ent)); + TEST_ASSERT_EQUAL_UINT(cmp_par.cmp_mode, cmp_ent_get_cmp_mode(ent)); + TEST_ASSERT_TRUE(cmp_ent_get_data_type_raw_bit(ent)); + TEST_ASSERT_EQUAL_INT(DATA_TYPE_CHUNK, cmp_ent_get_data_type(ent)); + + TEST_ASSERT_EQUAL_HEX8_ARRAY(col1, cmp_ent_get_data_buf(ent), COLLECTION_HDR_SIZE); + +#if 0 + for (i = 0; i < 2; i++) { + TEST_ASSERT_EQUAL_HEX(data1[i].exp_flags, raw_cmp_data1[i].exp_flags); + TEST_ASSERT_EQUAL_HEX(data1[i].fx, be32_to_cpu(raw_cmp_data1[i].fx)); + } + + TEST_ASSERT_EQUAL_HEX8_ARRAY(col1, cmp_ent_get_data_buf(ent), COLLECTION_HDR_SIZE); + + for (i = 0; i < 2; i++) { + TEST_ASSERT_EQUAL_HEX(data1[i].exp_flags, raw_cmp_data1[i].exp_flags); + TEST_ASSERT_EQUAL_HEX(data1[i].fx, be32_to_cpu(raw_cmp_data1[i].fx)); + } + + TEST_ASSERT_EQUAL_HEX8_ARRAY(col2, (uint8_t *)cmp_ent_get_data_buf(ent)+cmp_col_get_size(col1), COLLECTION_HDR_SIZE); + + for (i = 0; i < 2; i++) { + TEST_ASSERT_EQUAL_HEX(data2[i].exp_flags, raw_cmp_data2[i].exp_flags); + TEST_ASSERT_EQUAL_HEX(data2[i].fx, be32_to_cpu(raw_cmp_data2[i].fx)); + TEST_ASSERT_EQUAL_HEX(data2[i].efx, be32_to_cpu(raw_cmp_data2[i].efx)); + TEST_ASSERT_EQUAL_HEX(data2[i].ncob_x, be32_to_cpu(raw_cmp_data2[i].ncob_x)); + TEST_ASSERT_EQUAL_HEX(data2[i].ncob_y, be32_to_cpu(raw_cmp_data2[i].ncob_y)); + TEST_ASSERT_EQUAL_HEX(data2[i].ecob_x, be32_to_cpu(raw_cmp_data2[i].ecob_x)); + TEST_ASSERT_EQUAL_HEX(data2[i].ecob_y, be32_to_cpu(raw_cmp_data2[i].ecob_y)); + } +#endif + } + free(dst); + + /* error case: dst buffer to small */ + dst_capacity -= 1; + dst = malloc(dst_capacity); TEST_ASSERT_NOT_NULL(dst); + cmp_size = compress_chunk(chunk, CHUNK_SIZE, NULL, NULL, dst, + dst_capacity, &cmp_par); + TEST_ASSERT_EQUAL_INT(CMP_ERROR_SMALL_BUF, cmp_size); + free(dst); +} +/* TODO: chunk tests + * collection with 0 length; + * collection with wrong mix collections; + */ + +/** + * @test cmp_data_to_big_endian + */ +#if 0 +void notest_cmp_data_to_big_endian_error_cases(void) { struct cmp_cfg cfg = {0}; int cmp_size; @@ -4374,6 +4706,7 @@ void test_cmp_data_to_big_endian_error_cases(void) cmp_size_return = cmp_data_to_big_endian(&cfg, cmp_size); TEST_ASSERT_EQUAL_INT(cmp_size_return, -1); } +#endif /** diff --git a/test/decmp/test_decmp.c b/test/decmp/test_decmp.c index 3efa7c61c33240c3113ec21b6490f267fe030468..8e9fb8720c4d666388e219fe08932d618951d5c2 100644 --- a/test/decmp/test_decmp.c +++ b/test/decmp/test_decmp.c @@ -1294,10 +1294,10 @@ void test_cmp_ent_write_cmp_pars(void) cfg.max_used_bits = cmp_max_used_bits_list_get(42); /* create a compression entity */ - size = cmp_ent_create(NULL, cfg.data_type, cfg.cmp_mode == CMP_MODE_RAW, 12); + size = cmp_ent_create(NULL, cfg.data_type, cfg.cmp_mode == CMP_MODE_RAW, cmp_cal_size_of_data(cfg.samples, cfg.data_type)); TEST_ASSERT_NOT_EQUAL_INT(0, size); ent = malloc(size); TEST_ASSERT_NOT_NULL(ent); - size = cmp_ent_create(ent, cfg.data_type, cfg.cmp_mode == CMP_MODE_RAW, 12); + size = cmp_ent_create(ent, cfg.data_type, cfg.cmp_mode == CMP_MODE_RAW, cmp_cal_size_of_data(cfg.samples, cfg.data_type)); TEST_ASSERT_NOT_EQUAL_INT(0, size); error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); @@ -1305,7 +1305,7 @@ void test_cmp_ent_write_cmp_pars(void) TEST_ASSERT_EQUAL_INT(cfg.data_type, cmp_ent_get_data_type(ent)); TEST_ASSERT_EQUAL_INT(1, cmp_ent_get_data_type_raw_bit(ent)); - TEST_ASSERT_EQUAL_INT(12, cmp_ent_get_cmp_data_size(ent)); + TEST_ASSERT_EQUAL_INT(cmp_cal_size_of_data(cfg.samples, cfg.data_type), cmp_ent_get_cmp_data_size(ent)); TEST_ASSERT_EQUAL_INT(cmp_cal_size_of_data(cfg.samples, cfg.data_type), cmp_ent_get_original_size(ent)); TEST_ASSERT_EQUAL_INT(cfg.cmp_mode, cmp_ent_get_cmp_mode(ent)); @@ -1316,7 +1316,7 @@ void test_cmp_ent_write_cmp_pars(void) error = cmp_ent_read_header(ent, &cfg_read); TEST_ASSERT_FALSE(error); cfg.icu_output_buf = cmp_ent_get_data_buf(ent); /* quick fix that both cfg are equal */ - cfg.buffer_length = 12; /* quick fix that both cfg are equal */ + cfg.buffer_length = 18; /* quick fix that both cfg are equal */ TEST_ASSERT_EQUAL_MEMORY(&cfg, &cfg_read, sizeof(struct cmp_cfg)); free(ent); @@ -1493,12 +1493,12 @@ void test_cmp_ent_write_cmp_pars(void) cfg.model_value = 11; cfg.round = 2; cfg.samples = 9; - cfg.spill_mean = 1; - cfg.spill_variance = 2; - cfg.spill_pixels_error = 3; - cfg.cmp_par_mean = 7; - cfg.cmp_par_variance = 8; - cfg.cmp_par_pixels_error = 9; + cfg.spill_smearing_mean = 1; + cfg.spill_smearing_variance = 2; + cfg.spill_smearing_pixels_error = 3; + cfg.cmp_par_smearing_mean = 7; + cfg.cmp_par_smearing_variance = 8; + cfg.cmp_par_smearing_pixels_error = 9; cfg.max_used_bits = cmp_max_used_bits_list_get(42); /* create a compression entity */ @@ -1522,18 +1522,18 @@ void test_cmp_ent_write_cmp_pars(void) TEST_ASSERT_EQUAL_INT(cfg.round, cmp_ent_get_lossy_cmp_par(ent)); - TEST_ASSERT_EQUAL_INT(cfg.spill_mean, cmp_ent_get_non_ima_spill1(ent)); - TEST_ASSERT_EQUAL_INT(cfg.spill_variance, cmp_ent_get_non_ima_spill2(ent)); - TEST_ASSERT_EQUAL_INT(cfg.spill_pixels_error, cmp_ent_get_non_ima_spill3(ent)); - TEST_ASSERT_EQUAL_INT(0, cmp_ent_get_non_ima_spill4(ent)); - TEST_ASSERT_EQUAL_INT(0, cmp_ent_get_non_ima_spill5(ent)); - TEST_ASSERT_EQUAL_INT(0, cmp_ent_get_non_ima_spill6(ent)); - TEST_ASSERT_EQUAL_INT(cfg.cmp_par_mean, cmp_ent_get_non_ima_cmp_par1(ent)); - TEST_ASSERT_EQUAL_INT(cfg.cmp_par_variance, cmp_ent_get_non_ima_cmp_par2(ent)); - TEST_ASSERT_EQUAL_INT(cfg.cmp_par_pixels_error, cmp_ent_get_non_ima_cmp_par3(ent)); - TEST_ASSERT_EQUAL_INT(0, cmp_ent_get_non_ima_cmp_par4(ent)); - TEST_ASSERT_EQUAL_INT(0, cmp_ent_get_non_ima_cmp_par5(ent)); - TEST_ASSERT_EQUAL_INT(0, cmp_ent_get_non_ima_cmp_par6(ent)); + TEST_ASSERT_EQUAL_INT(0, cmp_ent_get_non_ima_spill1(ent)); + TEST_ASSERT_EQUAL_INT(0, cmp_ent_get_non_ima_spill2(ent)); + TEST_ASSERT_EQUAL_INT(0, cmp_ent_get_non_ima_spill3(ent)); + TEST_ASSERT_EQUAL_INT(cfg.spill_smearing_mean, cmp_ent_get_non_ima_spill4(ent)); + TEST_ASSERT_EQUAL_INT(cfg.spill_smearing_variance, cmp_ent_get_non_ima_spill5(ent)); + TEST_ASSERT_EQUAL_INT(cfg.spill_smearing_pixels_error, cmp_ent_get_non_ima_spill6(ent)); + TEST_ASSERT_EQUAL_INT(0, cmp_ent_get_non_ima_cmp_par1(ent)); + TEST_ASSERT_EQUAL_INT(0, cmp_ent_get_non_ima_cmp_par2(ent)); + TEST_ASSERT_EQUAL_INT(0, cmp_ent_get_non_ima_cmp_par3(ent)); + TEST_ASSERT_EQUAL_INT(cfg.cmp_par_smearing_mean, cmp_ent_get_non_ima_cmp_par4(ent)); + TEST_ASSERT_EQUAL_INT(cfg.cmp_par_smearing_variance, cmp_ent_get_non_ima_cmp_par5(ent)); + TEST_ASSERT_EQUAL_INT(cfg.cmp_par_smearing_pixels_error, cmp_ent_get_non_ima_cmp_par6(ent)); error = cmp_ent_read_header(ent, &cfg_read); TEST_ASSERT_FALSE(error); @@ -1669,7 +1669,7 @@ void test_cmp_ent_write_cmp_pars(void) free(ent); /* create a compression entity */ - cfg.data_type = DATA_TYPE_OFFSET; + cfg.data_type = DATA_TYPE_F_CAM_BACKGROUND; cfg.samples = 9; size = cmp_ent_create(NULL, cfg.data_type, cfg.cmp_mode == CMP_MODE_RAW, 12); TEST_ASSERT_NOT_EQUAL_INT(0, size); @@ -1678,40 +1678,40 @@ void test_cmp_ent_write_cmp_pars(void) TEST_ASSERT_NOT_EQUAL_INT(0, size); /* mean cmp_par to high */ - cfg.cmp_par_mean = 0x10000; + cfg.cmp_par_background_mean = 0x10000; error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); TEST_ASSERT_TRUE(error); - cfg.cmp_par_mean = 0xFFFF; + cfg.cmp_par_background_mean = 0xFFFF; /* mean spill to high */ - cfg.spill_mean = 0x1000000; + cfg.spill_background_mean = 0x1000000; error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); TEST_ASSERT_TRUE(error); - cfg.spill_mean = 0xFFFFFF; + cfg.spill_background_mean = 0xFFFFFF; /* variance cmp_par to high */ - cfg.cmp_par_variance = 0x10000; + cfg.cmp_par_background_variance = 0x10000; error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); TEST_ASSERT_TRUE(error); - cfg.cmp_par_variance = 0xFFFF; + cfg.cmp_par_background_variance = 0xFFFF; /* variance spill to high */ - cfg.spill_variance = 0x1000000; + cfg.spill_background_variance = 0x1000000; error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); TEST_ASSERT_TRUE(error); - cfg.spill_variance = 0xFFFFFF; + cfg.spill_background_variance = 0xFFFFFF; /* pixels_error cmp_par to high */ - cfg.cmp_par_pixels_error = 0x10000; + cfg.cmp_par_background_pixels_error = 0x10000; error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); TEST_ASSERT_TRUE(error); - cfg.cmp_par_pixels_error = 0xFFFF; + cfg.cmp_par_background_pixels_error = 0xFFFF; /* pixels_error spill to high */ - cfg.spill_pixels_error = 0x1000000; + cfg.spill_background_pixels_error = 0x1000000; error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); TEST_ASSERT_TRUE(error); - cfg.spill_pixels_error = 0xFFFFFF; + cfg.spill_background_pixels_error = 0xFFFFFF; cmp_ent_set_data_type(ent, DATA_TYPE_F_FX_EFX_NCOB_ECOB, 0); @@ -1815,13 +1815,17 @@ void test_cmp_ent_read_header_error_cases(void) uint32_t size; struct cmp_entity *ent; struct cmp_cfg cfg; + int cmp_size_bits = 10*8; - /* create a entity */ - size = cmp_ent_create(NULL, DATA_TYPE_IMAGETTE, 1, 10); + /* create a imagette entity */ + size = cmp_ent_create(NULL, DATA_TYPE_IMAGETTE, 0, 10); + /* created size smaller than max entity size -> returns max entity size */ TEST_ASSERT_EQUAL_UINT32(sizeof(struct cmp_entity), size); ent = malloc(size); TEST_ASSERT_NOT_NULL(ent); - size = cmp_ent_create(ent, DATA_TYPE_IMAGETTE, 1, 10); + size = cmp_ent_create(ent, DATA_TYPE_IMAGETTE, 0, 10); TEST_ASSERT_EQUAL_UINT32(sizeof(struct cmp_entity), size); + error = cmp_ent_set_cmp_mode(ent, CMP_MODE_DIFF_ZERO); + TEST_ASSERT_FALSE(error); /* ent = NULL */ error = cmp_ent_read_header(NULL, &cfg); @@ -1838,39 +1842,192 @@ void test_cmp_ent_read_header_error_cases(void) TEST_ASSERT_FALSE(error); /* unknown data type */ - cmp_ent_set_data_type(ent, DATA_TYPE_UNKNOWN, 1); + cmp_ent_set_data_type(ent, DATA_TYPE_UNKNOWN, 0); error = cmp_ent_read_header(ent, &cfg); TEST_ASSERT_TRUE(error); - cmp_ent_set_data_type(ent, (enum cmp_data_type)1000, 1); + cmp_ent_set_data_type(ent, (enum cmp_data_type)1000, 0); error = cmp_ent_read_header(ent, &cfg); TEST_ASSERT_TRUE(error); /* unknown data type */ - cmp_ent_set_data_type(ent, DATA_TYPE_F_CAM_BACKGROUND+1, 1); + cmp_ent_set_data_type(ent, DATA_TYPE_F_CAM_BACKGROUND+1, 0); error = cmp_ent_read_header(ent, &cfg); TEST_ASSERT_TRUE(error); /* this should work */ - cmp_ent_set_data_type(ent, DATA_TYPE_IMAGETTE, 1); + cmp_ent_set_data_type(ent, DATA_TYPE_IMAGETTE, 0); error = cmp_ent_read_header(ent, &cfg); TEST_ASSERT_FALSE(error); - /* cmp_mode CMP_MODE_RAW and no raw data bit */ - cmp_ent_set_data_type(ent, DATA_TYPE_IMAGETTE, 0); + /* original_size and data product type not compatible */ + cmp_ent_set_original_size(ent, 11); error = cmp_ent_read_header(ent, &cfg); TEST_ASSERT_TRUE(error); + /* this should work */ - cmp_ent_set_data_type(ent, DATA_TYPE_IMAGETTE, 1); + cmp_ent_set_original_size(ent, 12); error = cmp_ent_read_header(ent, &cfg); TEST_ASSERT_FALSE(error); - /* original_size and data product type not compatible */ - cmp_ent_set_original_size(ent, 11); + + /* create a raw entity */ + size = cmp_ent_create(ent, DATA_TYPE_IMAGETTE, 1, 10); + TEST_ASSERT_NOT_EQUAL_INT(0, size); + + /* mean cmp_par to high */ + cfg.cmp_par_background_mean = 0x10000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.cmp_par_background_mean = 0xFFFF; + + /* mean spill to high */ + cfg.spill_background_mean = 0x1000000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.spill_background_mean = 0xFFFFFF; + + /* variance cmp_par to high */ + cfg.cmp_par_background_variance = 0x10000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.cmp_par_background_variance = 0xFFFF; + + /* variance spill to high */ + cfg.spill_background_variance = 0x1000000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.spill_background_variance = 0xFFFFFF; + + /* pixels_error cmp_par to high */ + cfg.cmp_par_background_pixels_error = 0x10000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.cmp_par_background_pixels_error = 0xFFFF; + + /* pixels_error spill to high */ + cfg.spill_background_pixels_error = 0x1000000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.spill_background_pixels_error = 0xFFFFFF; + + + cmp_ent_set_data_type(ent, DATA_TYPE_F_FX_EFX_NCOB_ECOB, 0); + cfg.data_type = DATA_TYPE_F_FX_EFX_NCOB_ECOB; + + /* exp_flags cmp_par to high */ + cfg.cmp_par_exp_flags = 0x10000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.cmp_par_exp_flags = 0xFFFF; + + /* exp_flags spill to high */ + cfg.spill_exp_flags = 0x1000000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.spill_exp_flags = 0xFFFFFF; + + /* fx cmp_par to high */ + cfg.cmp_par_fx = 0x10000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.cmp_par_fx = 0xFFFF; + + /* fx spill to high */ + cfg.spill_fx = 0x1000000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.spill_fx = 0xFFFFFF; + + /* ncob cmp_par to high */ + cfg.cmp_par_ncob = 0x10000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.cmp_par_ncob = 0xFFFF; + + /* ncob spill to high */ + cfg.spill_ncob = 0x1000000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.spill_ncob = 0xFFFFFF; + + /* efx cmp_par to high */ + cfg.cmp_par_efx = 0x10000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.cmp_par_efx = 0xFFFF; + + /* efx spill to high */ + cfg.spill_efx = 0x1000000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.spill_efx = 0xFFFFFF; + + /* ecob cmp_par to high */ + cfg.cmp_par_ecob = 0x10000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.cmp_par_ecob = 0xFFFF; + + /* ecob spill to high */ + cfg.spill_ecob = 0x1000000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.spill_ecob = 0xFFFFFF; + + /* fx_cob_variance cmp_par to high */ + cfg.cmp_par_fx_cob_variance = 0x10000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.cmp_par_fx_cob_variance = 0xFFFF; + + /* fx_cob_variance spill to high */ + cfg.spill_fx_cob_variance = 0x1000000; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + cfg.spill_fx_cob_variance = 0xFFFFFF; + + /* test data type = DATA_TYPE_UNKNOWN */ + cmp_ent_set_data_type(ent, DATA_TYPE_UNKNOWN, 0); + cfg.data_type = DATA_TYPE_UNKNOWN; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + + /* test data type = DATA_TYPE_F_CAM_BACKGROUND +1 */ + cmp_ent_set_data_type(ent, DATA_TYPE_F_CAM_BACKGROUND + 1, 0); + cfg.data_type = DATA_TYPE_F_CAM_BACKGROUND + 1; + error = cmp_ent_write_cmp_pars(ent, &cfg, cmp_size_bits); + TEST_ASSERT_TRUE(error); + free(ent); + ent = NULL; + cmp_max_used_bits_list_empty(); + + + /* create a imagette entity */ + size = cmp_ent_create(NULL, DATA_TYPE_IMAGETTE, 1, 10); + ent = malloc(size); TEST_ASSERT_NOT_NULL(ent); + size = cmp_ent_create(ent, DATA_TYPE_IMAGETTE, 1, 10); + TEST_ASSERT_NOT_EQUAL_INT(0, size); + cmp_ent_set_cmp_mode(ent, CMP_MODE_RAW); + cmp_ent_set_original_size(ent, 10); + + /* this should work */ + error = cmp_ent_read_header(ent, &cfg); + TEST_ASSERT_FALSE(error); + + /* cmp_mode CMP_MODE_RAW and no raw data bit */ + cmp_ent_set_data_type(ent, DATA_TYPE_IMAGETTE, 0); error = cmp_ent_read_header(ent, &cfg); TEST_ASSERT_TRUE(error); + /* this should work */ - cmp_ent_set_original_size(ent, 12); + cmp_ent_set_data_type(ent, DATA_TYPE_IMAGETTE, 1); error = cmp_ent_read_header(ent, &cfg); TEST_ASSERT_FALSE(error); + /* cmp_mode CMP_MODE_RAW cmp_data_size != original_size */ + cmp_ent_set_data_type(ent, DATA_TYPE_IMAGETTE, 0); + cmp_ent_set_original_size(ent, 8); + error = cmp_ent_read_header(ent, &cfg); + TEST_ASSERT_TRUE(error); + free(ent); }