diff --git a/.docker/docker-compose.yml b/.docker/docker-compose.yml
index d51b00551a3a6af30bbb7d454f6fea00c17e7d65..a7496bc4f47838b9944b1c844a3b7d98e32cf1f3 100644
--- a/.docker/docker-compose.yml
+++ b/.docker/docker-compose.yml
@@ -14,10 +14,11 @@ services:
     restart: "no"
     container_name: dbrepo-metadata-db
     hostname: metadata-db
-    image: docker.io/dbrepo/metadata-db:1.4.4
+    image: docker.io/bitnami/mariadb:11.1.3-debian-11-r6
     volumes:
       - metadata-db-data:/bitnami/mariadb
-      - ./dist/2_setup-data.sql:/docker-entrypoint-initdb.d/2_setup-data.sql
+      - ./dbrepo-metadata-db/setup-schema.sql:/docker-entrypoint-initdb.d/1_setup-schema.sql
+      - ./dbrepo-metadata-db/setup-data.sql:/docker-entrypoint-initdb.d/2_setup-data.sql
     ports:
       - "3306:3306"
     environment:
@@ -35,7 +36,7 @@ services:
     restart: "no"
     container_name: dbrepo-data-db
     hostname: data-db
-    image: docker.io/bitnami/mariadb-galera:11.2.2-debian-11-r0
+    image: docker.io/bitnami/mariadb:11.1.3-debian-11-r6
     volumes:
       - data-db-data:/bitnami/mariadb
       - "${SHARED_VOLUME:-/tmp}:/tmp"
@@ -43,7 +44,6 @@ services:
       - "3307:3306"
     environment:
       MARIADB_ROOT_PASSWORD: "${USER_DB_PASSWORD:-dbrepo}"
-      MARIADB_GALERA_MARIABACKUP_PASSWORD: "${USER_DB_BACKUP_PASSWORD:-dbrepo}"
     healthcheck:
       test: mysqladmin ping --user="${USER_DB_USERNAME:-root}" --password="${USER_DB_PASSWORD:-dbrepo}" --silent
       interval: 10s
@@ -56,7 +56,7 @@ services:
     restart: "no"
     container_name: dbrepo-auth-db
     hostname: auth-db
-    image: docker.io/bitnami/mariadb:11.2.2-debian-11-r0
+    image: docker.io/bitnami/mariadb:11.1.3-debian-11-r6
     volumes:
       - auth-db-data:/bitnami/mariadb
     ports:
@@ -76,7 +76,7 @@ services:
     restart: "no"
     container_name: dbrepo-auth-service
     hostname: auth-service
-    image: docker.io/dbrepo/auth-service:1.4.4
+    image: registry.datalab.tuwien.ac.at/dbrepo/auth-service:1.4.4
     healthcheck:
       test: curl -sSL 'http://0.0.0.0:8080/realms/dbrepo' | grep "dbrepo" || exit 1
       interval: 10s
@@ -98,7 +98,7 @@ services:
     restart: "no"
     container_name: dbrepo-metadata-service
     hostname: metadata-service
-    image: docker.io/dbrepo/metadata-service:1.4.4
+    image: registry.datalab.tuwien.ac.at/dbrepo/metadata-service:1.4.4
     volumes:
       - "${SHARED_VOLUME:-/tmp}:/tmp"
     environment:
@@ -124,7 +124,7 @@ services:
       DELETED_RECORD: "${DELETED_RECORD:-persistent}"
       GRANULARITY: "${GRANULARITY:-YYYY-MM-DDThh:mm:ssZ}"
       JWT_PUBKEY: "${JWT_PUBKEY:-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqqnHQ2BWWW9vDNLRCcxD++xZg/16oqMo/c1l+lcFEjjAIJjJp/HqrPYU/U9GvquGE6PbVFtTzW1KcKawOW+FJNOA3CGo8Q1TFEfz43B8rZpKsFbJKvQGVv1Z4HaKPvLUm7iMm8Hv91cLduuoWx6Q3DPe2vg13GKKEZe7UFghF+0T9u8EKzA/XqQ0OiICmsmYPbwvf9N3bCKsB/Y10EYmZRb8IhCoV9mmO5TxgWgiuNeCTtNCv2ePYqL/U0WvyGFW0reasIK8eg3KrAUj8DpyOgPOVBn3lBGf+3KFSYi+0bwZbJZWqbC/Xlk20Go1YfeJPRIt7ImxD27R/lNjgDO/MwIDAQAB}"
-      LOG_LEVEL: "${LOG_LEVEL:-info}"
+      LOG_LEVEL: ${LOG_LEVEL:-info}
       METADATA_DB: "${METADATA_DB:-dbrepo}"
       METADATA_HOST: "${METADATA_HOST:-metadata-db}"
       METADATA_JDBC_EXTRA_ARGS: "${METADATA_JDBC_EXTRA_ARGS:-}"
@@ -134,13 +134,13 @@ services:
       REPOSITORY_NAME: "${REPOSITORY_NAME:-Database Repository}"
       SEARCH_SERVICE_ENDPOINT: "${SEARCH_SERVICE_ENDPOINT:-http://gateway-service}"
       S3_ACCESS_KEY_ID: "${S3_ACCESS_KEY_ID:-seaweedfsadmin}"
-      S3_ENDPOINT: "${S3_ENDPOINT:-http://gateway-service/api/storage}"
+      S3_ENDPOINT: "${S3_ENDPOINT:-http://storage-service:9000}"
       S3_EXPORT_BUCKET: "${S3_EXPORT_BUCKET:-dbrepo-download}"
       S3_IMPORT_BUCKET: "${S3_IMPORT_BUCKET:-dbrepo-upload}"
       S3_SECRET_ACCESS_KEY: "${S3_SECRET_ACCESS_KEY:-seaweedfsadmin}"
       SPARQL_CONNECTION_TIMEOUT: "${SPARQL_CONNECTION_TIMEOUT:-10000}"
     healthcheck:
-      test: wget -qO- localhost:8080/actuator/health/readiness | grep -q "UP" || exit 1
+      test: curl -sSL localhost:8080/actuator/health/liveness | grep 'UP' || exit 1
       interval: 10s
       timeout: 5s
       retries: 12
@@ -160,7 +160,7 @@ services:
     restart: "no"
     container_name: dbrepo-analyse-service
     hostname: analyse-service
-    image: docker.io/dbrepo/analyse-service:1.4.4
+    image: registry.datalab.tuwien.ac.at/dbrepo/analyse-service:1.4.4
     environment:
       ADMIN_PASSWORD: "${ADMIN_PASSWORD:-admin}"
       ADMIN_USERNAME: "${ADMIN_USERNAME:-admin}"
@@ -211,7 +211,7 @@ services:
     restart: "no"
     container_name: dbrepo-search-db
     hostname: search-db
-    image: docker.io/dbrepo/search-db:1.4.4
+    image: registry.datalab.tuwien.ac.at/dbrepo/search-db:1.4.4
     healthcheck:
       test: curl -sSL localhost:9200/_plugins/_security/health | jq .status | grep UP
       interval: 10s
@@ -235,7 +235,7 @@ services:
     restart: "no"
     container_name: dbrepo-search-service
     hostname: search-service
-    image: docker.io/dbrepo/search-service:1.4.4
+    image: registry.datalab.tuwien.ac.at/dbrepo/search-service:1.4.4
     environment:
       ADMIN_PASSWORD: "${ADMIN_PASSWORD:-admin}"
       ADMIN_USERNAME: "${ADMIN_USERNAME:-admin}"
@@ -243,6 +243,7 @@ services:
       AUTH_SERVICE_CLIENT_SECRET: ${AUTH_SERVICE_CLIENT:-MUwRc7yfXSJwX8AdRMWaQC3Nep1VjwgG}
       AUTH_SERVICE_ENDPOINT: ${AUTH_SERVICE_ENDPOINT:-http://auth-service:8080}
       COLLECTION: ${COLLECTION:-['database','table','column','identifier','unit','concept','user','view']}
+      GATEWAY_SERVICE_ENDPOINT: ${GATEWAY_SERVICE_ENDPOINT:-http://gateway-service}
       OPENSEARCH_HOST: ${OPENSEARCH_HOST:-search-db}
       OPENSEARCH_PORT: ${OPENSEARCH_PORT:-9200}
       OPENSEARCH_USERNAME: ${OPENSEARCH_USERNAME:-admin}
@@ -253,7 +254,7 @@ services:
     restart: "no"
     container_name: dbrepo-data-db-sidecar
     hostname: data-db-sidecar
-    image: docker.io/dbrepo/data-db-sidecar:1.4.4
+    image: registry.datalab.tuwien.ac.at/dbrepo/data-db-sidecar:1.4.4
     environment:
       S3_ACCESS_KEY_ID: "${S3_ACCESS_KEY_ID:-seaweedfsadmin}"
       S3_ENDPOINT: "${S3_ENDPOINT:-http://storage-service:9000}"
@@ -275,7 +276,7 @@ services:
     restart: "no"
     container_name: dbrepo-ui
     hostname: ui
-    image: docker.io/dbrepo/ui:1.4.4
+    image: registry.datalab.tuwien.ac.at/dbrepo/ui:1.4.4
     depends_on:
       dbrepo-search-service:
         condition: service_started
@@ -293,7 +294,7 @@ services:
     restart: "no"
     container_name: dbrepo-gateway-service
     hostname: gateway-service
-    image: docker.io/nginx:1.25-alpine-slim
+    image: docker.io/nginx:1.27.0-alpine3.19-slim
     ports:
       - "80:80"
       - "443:443"
@@ -319,7 +320,7 @@ services:
     restart: "no"
     container_name: dbrepo-search-service-init
     hostname: search-service-init
-    image: docker.io/dbrepo/search-service-init:1.4.4
+    image: registry.datalab.tuwien.ac.at/dbrepo/search-service-init:1.4.4
     environment:
       GATEWAY_SERVICE_ENDPOINT: ${GATEWAY_SERVICE_ENDPOINT:-http://gateway-service}
       OPENSEARCH_HOST: ${OPENSEARCH_HOST:-search-db}
@@ -354,7 +355,7 @@ services:
     restart: "no"
     container_name: dbrepo-storage-service-init
     hostname: storage-service-init
-    image: docker.io/dbrepo/storage-service-init:1.4.4
+    image: registry.datalab.tuwien.ac.at/dbrepo/storage-service-init:1.4.4
     environment:
       SEAWEEDFS_ENDPOINT: "${STORAGE_SEAWEEDFS_ENDPOINT:-storage-service:9333}"
     depends_on:
@@ -391,7 +392,7 @@ services:
     restart: "no"
     container_name: dbrepo-data-service
     hostname: data-service
-    image: docker.io/dbrepo/data-service:1.4.4
+    image: registry.datalab.tuwien.ac.at/dbrepo/data-service:1.4.4
     volumes:
       - "${SHARED_VOLUME:-/tmp}:/tmp"
     environment:
@@ -429,7 +430,7 @@ services:
       S3_IMPORT_BUCKET: "${S3_IMPORT_BUCKET:-dbrepo-upload}"
       S3_SECRET_ACCESS_KEY: "${S3_SECRET_ACCESS_KEY:-seaweedfsadmin}"
     healthcheck:
-      test: wget -qO- localhost:8080/actuator/health/readiness | grep -q "UP" || exit 1
+      test: curl -sSL localhost:8080/actuator/health/liveness | grep 'UP' || exit 1
       interval: 10s
       timeout: 5s
       retries: 12
diff --git a/.docs/api/analyse-service.md b/.docs/api/analyse-service.md
index 484271bbfe75062897c6a7a2a4497e084337f3e1..fe45e9492c4a7c53c024603690132e2dfa5aeec9 100644
--- a/.docs/api/analyse-service.md
+++ b/.docs/api/analyse-service.md
@@ -6,7 +6,7 @@ author: Martin Weise
 
 !!! debug "Debug Information"
 
-    Image: [`dbrepo/analyse-service:__APPVERSION__`](https://hub.docker.com/r/dbrepo/analyse-service)
+    Image: [`registry.datalab.tuwien.ac.at/dbrepo/analyse-service:1.4.4`](https://hub.docker.com/r/dbrepo/analyse-service)
 
     * Ports: 5000/tcp
     * Prometheus: `http://<hostname>:5000/metrics`
@@ -15,37 +15,37 @@ author: Martin Weise
 
 ## Overview
 
-It suggests data types for the [User Interface](./system-other-ui) when creating a table from a 
-*comma separated values* (CSV) -file. It recommends enumerations for columns and returns e.g. a list of potential 
+It suggests data types for the [User Interface](../ui) when creating a table from a
+*comma separated values* (CSV) -file. It recommends enumerations for columns and returns e.g. a list of potential
 primary key candidates. The researcher is able to confirm these suggestions manually. Moreover, the Analyse Service
 determines basic statistical properties of numerical columns.
 
 ### Analysis
 
-After [uploading](./system-services-storage/#buckets) the CSV-file into the `dbrepo-upload` bucket of 
-the [Storage Service](./system-services-storage), analysis for data types and primary keys follows the flow:
- 
-1. Retrieve the CSV-file from the `dbrepo-upload` bucket of the Storage Service as data stream (=nothing is stored in 
+After [uploading](../storage-service/#buckets) the CSV-file into the `dbrepo-upload` bucket of
+the [Storage Service](../storage-service), analysis for data types and primary keys follows the flow:
+
+1. Retrieve the CSV-file from the `dbrepo-upload` bucket of the Storage Service as data stream (=nothing is stored in
    the service) with the [`boto3`](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) client.
-2. When no separator is known, the Analyse Service tries to guess the separator from the first line 
+2. When no separator is known, the Analyse Service tries to guess the separator from the first line
    with [`csv.Sniff().sniff(...)`](https://docs.python.org/3/library/csv.html#csv.Sniffer). This step is optional when
    the separator was provided via HTTP-payload: `{"separator": ";", ...}`
-3. With the separator known (either from step 2 or via HTTP-payload), 
-   the [`messytables.CSVTableSet(...)`](https://messytables.readthedocs.io/en/latest/#csv-support) guesses the headers
-   and column types and enums, if the HTTP-payload contains `{"enum": true, ...}`.
+3. With the separator known (either from step 2 or via HTTP-payload), the [`Pandas`](https://pypi.org/project/pandas/)
+   guesses the headers and column types and enums, if the HTTP-payload contains `{"enum": true, ...}`. The data type
+   is guessed by a combination of Pandas and heuristics.
 
 ### Examples
 
-See the [usage page](./usage-analyse/) for examples.
+See the [usage page](..) for examples.
 
 ## Limitations
 
 !!! question "Do you miss functionality? Do these limitations affect you?"
 
     We strongly encourage you to help us implement it as we are welcoming contributors to open-source software and get
-    in [contact](./contact) with us, we happily answer requests for collaboration with attached CV and your programming 
+    in [contact](../../contact) with us, we happily answer requests for collaboration with attached CV and your programming 
     experience!
 
 ## Security
 
-1. Credentials for the [Storage Service](./system-services-storage) are stored in plaintext environment variables.
+1. Credentials for the [Storage Service](../storage-service) are stored in plaintext environment variables.
diff --git a/.docs/api/auth-service.md b/.docs/api/auth-service.md
index 5d3e0f42b2bb19b28451c8a8c8e40d937ffe9fab..35c715fc1b6b6e16734a6059a9eaf388e53bb3ad 100644
--- a/.docs/api/auth-service.md
+++ b/.docs/api/auth-service.md
@@ -6,17 +6,23 @@ author: Martin Weise
 
 !!! debug "Debug Information"
 
-    Image: [`dbrepo/authentication-service:__APPVERSION__`](https://hub.docker.com/r/dbrepo/authentication-service)
+    Image: [`registry.datalab.tuwien.ac.at/dbrepo/authentication-service:1.4.4`](https://hub.docker.com/r/dbrepo/authentication-service)
 
     * Ports: 8080/tcp
-    * UI: `http://<hostname>/api/auth/admin/`
+    * UI: `http://<hostname>/api/auth/`
 
 ## Overview
 
-By default, users are created using the [User Interface](../system-other-ui) and the sign-up page in the User Interface.
-This creates a new user in the [Authentication Database](../system-databases-authentication), the user identity is then
-managed by the
-Authentication Service.
+By default, users are created using the [User Interface](../ui) and the sign-up page in the User Interface.
+This creates a new user in Keycloak. The user identity is then managed by the Auth Service. Only a very small subset
+of immutable properties (id, username) is mirrored in the [Metadata Database](../metadata-db) for faster access.
+
+## Identities
+
+:octicons-tag-16:{ title="Minimum version" } 1.4.4
+
+Identities can also be added in Keycloak directly. When requesting a JWT token from the `/api/user` endpoint, the
+immutable properties mentioned in c.f. [Overview](#overview) are copied transparent to the user on first login.
 
 ## Groups
 
@@ -41,163 +47,16 @@ Each of the composite role has a set of other associated composite roles.
 </figure>
 
 There is one role for one specific action in the services. For example: the `create-database` role authorizes a user to
-create a database in a Docker container. Therefore,
-the [`DatabaseEndpoint.java`](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/blob/a5bdd1e2169bae6497e2f7eee82dad8b9b059850/fda-database-service/rest-service/src/main/java/at/tuwien/endpoints/DatabaseEndpoint.java#L78)
-endpoint requires a JWT access token with this authority.
-
-```java
-@PostMapping
-@PreAuthorize("hasAuthority('create-database')")
-public ResponseEntity<DatabaseBriefDto> create(@NotNull Long containerId,
-                                               @Valid @RequestBody DatabaseCreateDto createDto,
-                                               @NotNull Principal principal) {
-...
-}
-```
-
-### Default Container Handling
-
-| Name              | Description                   |
-|-------------------|-------------------------------|
-| `find-container`  | Can find a specific container |
-| `list-containers` | Can list all containers       |
-
-### Default Database Handling
-
-| Name                         | Description                                          |
-|------------------------------|------------------------------------------------------|
-| `check-database-access`      | Can check the access to a database of a user         |
-| `create-database`            | Can create a database                                |
-| `create-database-access`     | Can give a new access to a database of a user        |
-| `delete-database-access`     | Can delete the access to a database of a user        |
-| `find-database`              | Can find a specific database in a container          |
-| `list-databases`             | Can list all databases in a container                |
-| `modify-database-image`      | Can update the database image                        |
-| `modify-database-owner`      | Can modify the database owner                        |
-| `modify-database-visibility` | Can modify the database visibility (public, private) |
-| `update-database-access`     | Can update the access to a database of a user        |
-
-### Default Table Handling
-
-| Name                            | Description                                          |
-|---------------------------------|------------------------------------------------------|
-| `create-table`                  | Can create a table                                   |
-| `find-tables`                   | Can list a specific table in a database              |
-| `list-tables`                   | Can list all tables                                  |
-| `modify-table-column-semantics` | Can modify the column semantics of a specific column |
-| `delete-table`                  | Can delete tables owned by the user in a database    |
-
-### Default Query Handling
-
-| Name                      | Description                                   |
-|---------------------------|-----------------------------------------------|
-| `create-database-view`    | Can create a view in a database               |
-| `delete-database-view`    | Can delete a view in a database               |
-| `delete-table-data`       | Can delete data in a table                    |
-| `execute-query`           | Can execute a query statement                 |
-| `export-query-data`       | Can export the data that a query has produced |
-| `export-table-data`       | Can export the data stored in a table         |
-| `find-database-view`      | Can find a specific database view             |
-| `find-query`              | Can find a specific query in the query store  |
-| `insert-table-data`       | Can insert data into a table                  |
-| `list-database-views`     | Can list all database views                   |
-| `list-queries`            | Can list all queries in the query store       |
-| `persist-query`           | Can persist a query in the query store        |
-| `re-execute-query`        | Can re-execute a query to reproduce a result  |
-| `view-database-view-data` | Can view the data produced by a database view |
-| `view-table-data`         | Can view the data in a table                  |
-| `view-table-history`      | Can view the data history of a table          |
-
-### Default Identifier Handling
-
-| Name                | Description                                 |
-|---------------------|---------------------------------------------|
-| `create-identifier` | Can create an identifier (subset, database) |
-| `find-identifier`   | Can find a specific identifier              |
-| `list-identifier`   | Can list all identifiers                    |
-
-### Default User Handling
-
-| Name                      | Description                             |
-|---------------------------|-----------------------------------------|
-| `modify-user-theme`       | Can modify the user theme (light, dark) |
-| `modify-user-information` | Can modify the user information         |
-
-### Default Maintenance Handling
-
-| Name                         | Description                              |
-|------------------------------|------------------------------------------|
-| `create-maintenance-message` | Can create a maintenance message banner  |
-| `delete-maintenance-message` | Can delete a maintenance message banner  |
-| `find-maintenance-message`   | Can find a maintenance message banner    |
-| `list-maintenance-messages`  | Can list all maintenance message banners |
-| `update-maintenance-message` | Can update a maintenance message banner  |
-
-### Default Semantics Handling
-
-| Name                      | Description                                                     |
-|---------------------------|-----------------------------------------------------------------|
-| `create-semantic-unit`    | Can save a previously unknown unit for a table column           |
-| `create-semantic-concept` | Can save a previously unknown concept for a table column        |
-| `execute-semantic-query`  | Can query remote SPARQL endpoints to get labels and description |
-| `table-semantic-analyse`  | Can automatically suggest units and concepts for a table        |
-
-### Escalated User Handling
-
-| Name        | Description                                   |
-|-------------|-----------------------------------------------|
-| `find-user` | Can list user information for a specific user |
-
-### Escalated Container Handling
-
-| Name               | Description              |
-|--------------------|--------------------------|
-| `create-container` | Can create a container   |
-| `delete-container` | Can delete any container |
-
-### Escalated Database Handling
-
-| Name              | Description                              |
-|-------------------|------------------------------------------|
-| `delete-database` | Can delete any database in any container |
-
-### Escalated Table Handling
-
-| Name                   | Description                          |
-|------------------------|--------------------------------------|
-| `delete-foreign-table` | Can delete any table in any database |
-
-### Escalated Query Handling
-
-| Name | Description |
-|------|-------------|
-| /    |             |
-
-### Escalated Identifier Handling
-
-| Name                         | Description                                       |
-|------------------------------|---------------------------------------------------|
-| `create-foreign-identifier`  | Can create an identifier to any database or query |
-| `delete-identifier`          | Can delete any identifier                         |
-| `modify-identifier-metadata` | Can modify any identifier metadata                |
-
-### Escalated Semantics Handling
-
-| Name                                    | Description                                  |
-|-----------------------------------------|----------------------------------------------|
-| `create-ontology`                       | Can register a new ontology                  |
-| `delete-ontology`                       | Can unregister an ontology                   |
-| `list-ontologies`                       | Can list all ontologies                      |
-| `modify-foreign-table-column-semantics` | Can modify any table column concept and unit |
-| `update-ontology`                       | Can update ontology metadata                 |
-| `update-semantic-concept`               | Can update own table column concept          |
-| `update-semantic-unit`                  | Can update own table column unit             |
+create a database.
+
+A full list of available roles can be obtained
+from [`dbrepo-realm.json`](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/blob/fb8d14ba02ee32b9a69a30905437b5c9e28adc21/dbrepo-auth-service/dbrepo-realm.json#L46)
+which is imported into Keycloak on startup.
 
 ## Limitations
 
 * No support for sending e-mails through Keycloak by default.
 * No support for temporary passwords.
-* No support for adding identifies in Keycloak directly.
 * No support for multi-factor authentication.
 
 !!! question "Do you miss functionality? Do these limitations affect you?"
@@ -208,5 +67,5 @@ public ResponseEntity<DatabaseBriefDto> create(@NotNull Long containerId,
 
 ## Security
 
-1. Mount your TLS certificate / private key pair into `/app/tls.crt` and `/app/tls.key` and
-   set `KC_HTTPS_CERTIFICATE_FILE=/app/tls.crt` and set `KC_HTTPS_CERTIFICATE_KEY_FILE=/app/tls.key`.
+1. Keycloak should be configured to use TLS certificates, follow
+   the [official documentation](https://www.keycloak.org/server/enabletls).
diff --git a/.docs/api/data-db.md b/.docs/api/data-db.md
index c91d230be7776ecaa904513396895e4d6725781c..3b2738f981eefd2749b95a40a347951ea9a0a39c 100644
--- a/.docs/api/data-db.md
+++ b/.docs/api/data-db.md
@@ -4,7 +4,7 @@ author: Martin Weise
 
 !!! debug "Debug Information"
 
-    Image: [`bitnami/mariadb-galera:11.2.2-debian-11-r0`](https://hub.docker.com/r/bitnami/mariadb-galera)
+    Image: [`docker.io/bitnami/mariadb:11.1.3-debian-11-r6`](https://hub.docker.com/r/bitnami/mariadb)
 
     * Ports: 3306/tcp
     * JDBC: `jdbc://mariadb:<hostname>:3306`
@@ -17,20 +17,26 @@ author: Martin Weise
 
 ## Overview
 
-By default, only one Data Database is deployed. You can deploy multiple (different) Data Database instances and make
-them available in the repository as follows:
+The Data Database contains the research data. In the default configuration, only one database of this type is deployed.
+Any number of MariaDB ata databases can be integrated into DBRepo, even non-empty databases. The database needs to be
+registered in the Metadata Database to be visible in the [User Interface](../ui) and usable from e.g. the Python 
+Library.
 
-=== "Terminal"
+## Architecture
 
-    ```shell
-    curl \
-       -sSL \
-       http://<hostname>/api/container \
-       -X POST \
-       -d '{"name": "Data Database 2", "imageId": 1, "host": "example.com", "port": 3306, "privilegedUsername": "root", "privilegedPassword": "s3cr3t" }'
-    ```
+### Sidecar
+
+We deploy a sidecar that handles the CSV-file upload/download operations between
+the [Storage Service](../system-services-storage) and the Data Database using a Python Flask application and
+the [`boto3`](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) client until MariaDB supports S3
+natively.
+
+<figure markdown>
+![Sidecar architecture detailed](../images/architecture-data-db.svg)
+<figcaption>Sidecar that handles the CSV-file upload/download.</figcaption>
+</figure>
 
-### Settings
+## Data
 
 The procedures require the user-generated databases to have the same collation (because of comparison operations).
 Ensure that the Data Database has the character set `utf8mb4` and collation `utf8mb4_general_ci` in your `my.cfg`:
@@ -51,18 +57,6 @@ mariadb-galera:
   extraFlags: "--character-set-server=utf8mb4 --collation-server=utf8mb4_general_ci"
 ```
 
-### Sidecar
-
-We deploy a sidecar that handles the CSV-file upload/download operations between
-the [Storage Service](../system-services-storage) and the Data Database using a Python Flask application and
-the [`boto3`](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) client until MariaDB supports S3
-natively.
-
-<figure markdown>
-![Sidecar architecture detailed](../images/architecture-data-db.svg)
-<figcaption>Sidecar that handles the CSV-file upload/download.</figcaption>
-</figure>
-
 ### Backup
 
 Export all databases with `--skip-lock-tables` option for MariaDB Galera clusters as it is not supported currently by
diff --git a/.docs/api/data-service.md b/.docs/api/data-service.md
index 41efb2151420a4507d6d0e15e6df5e8be7486984..ab64c50d31dd29597f7c4bff956bee3cfeb66814 100644
--- a/.docs/api/data-service.md
+++ b/.docs/api/data-service.md
@@ -6,7 +6,7 @@ author: Martin Weise
 
 !!! debug "Debug Information"
 
-    Image: [`dbrepo/data-service:__APPVERSION__`](https://hub.docker.com/r/dbrepo/data-service)
+    Image: [`registry.datalab.tuwien.ac.at/dbrepo/data-service:1.4.4`](https://hub.docker.com/r/dbrepo/data-service)
 
     * Ports: 9093/tcp
     * Info: `http://<hostname>:9093/actuator/info`
@@ -27,7 +27,7 @@ Data Service up.
 !!! question "Do you miss functionality? Do these limitations affect you?"
 
     We strongly encourage you to help us implement it as we are welcoming contributors to open-source software and get
-    in [contact](./contact) with us, we happily answer requests for collaboration with attached CV and your programming 
+    in [contact](../../contact) with us, we happily answer requests for collaboration with attached CV and your programming 
     experience!
 
 ## Security
diff --git a/.docs/api/gateway-service.md b/.docs/api/gateway-service.md
index cd3be4f73dd8f4891513615f7b901c055f71fed5..923b95a9f30ac9af06bb029682bd67bc7f2f0961 100644
--- a/.docs/api/gateway-service.md
+++ b/.docs/api/gateway-service.md
@@ -6,21 +6,21 @@ author: Martin Weise
 
 !!! debug "Debug Information"
 
-    Image: [`nginx:1.25-alpine-slim`](https://hub.docker.com/r/nginx)
+    Image: [`docker.io/nginx:1.27.0-alpine3.19-slim`](https://hub.docker.com/r/nginx)
 
     * Ports: 80/tcp
 
 ## Overview
 
 Provides a single point of access to the *application programming interface* (API) and configures a
-standard [NGINX](https://www.nginx.com/) reverse proxy for load balancing. This component is optional if you already have a load balancer
-or reverse proxy running.
+standard [NGINX](https://www.nginx.com/) reverse proxy for load balancing. This component is optional if you already
+have a load balancer or reverse proxy running.
 
 ## Settings
 
 ### SSL/TLS Security
 
-To setup SSL/TLS encryption, mount your TLS certificate and TLS private key into the container directly into the 
+To setup SSL/TLS encryption, mount your TLS certificate and TLS private key into the container directly into the
 `/etc/nginx/` directory.
 
 ```yaml title="docker-compose.yml"
@@ -41,14 +41,14 @@ If your TLS private key as a password, you need to specify it in the `dbrepo.con
 
 ### User Interface
 
-To serve the [User Interface](./system-other-ui/) under different port than `80`, change the port mapping in 
+To serve the [User Interface](../ui/) under different port than `80`, change the port mapping in
 the `docker-compose.yml` to e.g. port `8000`:
 
 ```yaml title="docker-compose.yml"
 services:
   ...
   dbrepo-gateway-service:
-    image: docker.io/nginx:1.25-alpine-slim
+    image: docker.io/nginx:1.27.0-alpine3.19-slim
     ports:
       - "8000:80"
   ...
@@ -61,13 +61,12 @@ services:
 !!! question "Do you miss functionality? Do these limitations affect you?"
 
     We strongly encourage you to help us implement it as we are welcoming contributors to open-source software and get
-    in [contact](./contact) with us, we happily answer requests for collaboration with attached CV and your programming 
+    in [contact](../../contact) with us, we happily answer requests for collaboration with attached CV and your programming 
     experience!
 
-
 ## Security
 
-1. Enable TLS encryption by downloading 
+1. Enable TLS encryption by downloading
    the [`dbrepo.conf`](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/raw/master/dbrepo-gateway-service/dbrepo.conf)
    and editing the *server* block to include your TLS certificate (with trust chain) `fullchain.pem` and TLS private key
    `privkey.pem` (PEM-encoded).
diff --git a/.docs/api/metadata-db.md b/.docs/api/metadata-db.md
index 38cbe3f127f0cbddaac7c8eda6f9a448a549d8f6..f5cc4d84ccefa303bce99af21d3a8116895946ba 100644
--- a/.docs/api/metadata-db.md
+++ b/.docs/api/metadata-db.md
@@ -4,7 +4,7 @@ author: Martin Weise
 
 !!! debug "Debug Information"
 
-    Image: [`bitnami/mariadb-galera:11.2.2-debian-11-r0`](https://hub.docker.com/r/bitnami/mariadb-galera)
+    Image: [`docker.io/bitnami/mariadb:11.1.3-debian-11-r6`](https://hub.docker.com/r/bitnami/mariadb)
 
     * Ports: 3306/tcp
     * JDBC: `jdbc://mariadb:<hostname>:3306`
@@ -23,11 +23,12 @@ services:
   dbrepo-metadata-db:
     ...
     volumes:
-      - /path/to/setup-some-data.sql:/docker-entrypoint-initdb.d/setup-some-data.sql
+      - /path/to/setup-schema.sql:/docker-entrypoint-initdb.d/1_setup-schema.sql
+      - /path/to/setup-data.sql:/docker-entrypoint-initdb.d/2_setup-data.sql
     ...
 ```
 
 !!! warning "Alphabetic Filename Sorting"
 
     Beware that the init script provided by Bitnami executes files in alphabetic order! For example: the file 
-    `setup-schema.sql` is executed **after** the file `setup-data.sql`!
\ No newline at end of file
+    `setup-schema.sql` is executed **after** the file `setup-data.sql`! Thefore a sorting prefix 1-9 is recommended!
\ No newline at end of file
diff --git a/.docs/api/metadata-service.md b/.docs/api/metadata-service.md
index 362a9c36bcf6a32ba8262d002716e108d998b2be..fa365219cc6d2c528197fc39354f5410e828166c 100644
--- a/.docs/api/metadata-service.md
+++ b/.docs/api/metadata-service.md
@@ -6,7 +6,7 @@ author: Martin Weise
 
 !!! debug "Debug Information"
 
-    Image: [`dbrepo/metadata-service:__APPVERSION__`](https://hub.docker.com/r/dbrepo/metadata-service)
+    Image: [`registry.datalab.tuwien.ac.at/dbrepo/metadata-service:1.4.4`](https://hub.docker.com/r/dbrepo/metadata-service)
 
     * Ports: 9099/tcp
     * Info: `http://<hostname>:9099/actuator/info`
@@ -14,45 +14,39 @@ author: Martin Weise
         - Readiness: `http://<hostname>:9099/actuator/health/readiness`
         - Liveness: `http://<hostname>:9099/actuator/health/liveness`
     * Prometheus: `http://<hostname>:9099/actuator/prometheus`
-    * Swagger UI: `http://<hostname>:9099/swagger-ui/index.html` <a href="./swagger/metadata" target="_blank">:fontawesome-solid-square-up-right: view online</a>
+    * Swagger UI: `http://<hostname>:9099/swagger-ui/index.html`
 
 ## Overview
 
-This service manages the following topics:
+The metadata service manages metadata of identities, the [Broker Service](../broker-service) (i.e. obtaining queue
+types), semantic concepts (i.e. ontologies) and relational metadata (databases, tables, queries, views) and identifiers.
 
-* Databases
-* Identifiers (DataCite, OAI-PMH)
-* Queries
-* Semantics (Ontologies)
-* Tables
-* Users
-* Views
+## Generation
 
-### Databases
+Most of the metadata available in DBRepo is generated automatically, leveraging the available information and taking
+the burden away from researchers, data stewards, etc. For example, the schema (names, constraints, data length) of
+generated tables and views is obtained from the `information_schema` database maintained by MariaDB internally.
 
-The service handles table operations inside a database. We use [Hibernate](https://hibernate.org/orm/) for schema and
-data ingest operations.
-
-### Identifiers
+## Identifiers
 
 The service is responsible for creating and resolving a *persistent identifier* (PID) attached to a database, subset,
 table or view to obtain the metadata attached to it and allow reproduction of the exact same result.
 
-This service also provides an OAI-PMH endpoint for metadata aggregators 
+This service also provides an OAI-PMH endpoint for metadata aggregators
 (e.g. [OpenAIRE Graph](https://graph.openaire.eu/)). Through the User Interface, it also exposes metadata through
 JSON-LD to metadata aggregators (e.g. [Google Datasets](https://datasetsearch.research.google.com/)). PID metadata
 is always exposed, even for private databases.
 
-The service generates internal PIDs, essentially representing internal URIs in 
-the [DataCite Metadata Schema 4.4](https://doi.org/10.14454/3w3z-sa82). This can be enhanced with activating the 
-external DataCite Fabrica system to generate DOIs, this is disabled by default. 
+The service generates internal PIDs, essentially representing internal URIs in
+the [DataCite Metadata Schema 4.4](https://doi.org/10.14454/3w3z-sa82). This can be enhanced with activating the
+external DataCite Fabrica system to generate DOIs, this is disabled by default.
 
 To activate DOI minting, pass your DataCite Fabrica credentials in the environment variables:
 
 ```yaml title="docker-compose.yml"
 services:
   dbrepo-metadata-service:
-    image: docker.io/dbrepo/metadata-service:1.4.0
+    image: registry.datalab.tuwien.ac.at/dbrepo/metadata-service:1.4.4
     environment:
       spring_profiles_active: doi
       DATACITE_URL: https://api.datacite.org
@@ -62,72 +56,13 @@ services:
   ...
 ```
 
-### Queries
-
-It provides an interface to insert data into the tables. It also allows for view-only, paginated and versioned query
-execution to the raw data. Any stale queries (query that have been executed by users in DBRepo but were not saved) are
-periodically being deleted from the query store based on the `DELETE_STALE_QUERIES_RATE` environment variable (defaults
-to 60 seconds).
-
-Executing SQL queries through the Query Endpoint must fulfill some restrictions:
+## Semantics
 
-* The SQL query does not contain at semicolon `;`
-
-### Semantics
-
-The service provides metadata to the table columns in the [Metadata Database](./system-databases-metadata) from
-registered ontologies like Wikidata [`wd:`](https://wikidata.org), Ontology of Units of
+The service provides metadata to the table columns in the [Metadata Database](../metadata-db) fromregistered ontologies
+like Wikidata [`wd:`](https://wikidata.org), Ontology of Units of
 Measurement [`om2:`](https://www.ontology-of-units-of-measure.org/resource/om-2), Friend of a
 Friend [`foaf:`](http://xmlns.com/foaf/0.1/), the [`prov:`](http://www.w3.org/ns/prov#) namespace, etc.
 
-### Tables
-
-The service manages tables in the [Data Database](./system-databases-data) and manages the metadata of these tables
-in the [Metadata Database](./system-databases-metadata). Any tables that are created outside of DBRepo (e.g. directly via the JDBC API) are
-periodically fetched by this service (based on the `OBTAIN_METADATA_RATE` environment variable, default interval is 60
-seconds).
-
-### Users
-
-The service manages users in the [Data Database](./system-databases-data)
-and [Metadata Database](./system-databases-metadata), as well as in the [Broker Service](./system-services-broker)
-and the [Authentication Service](./system-services-authentication).
-
-The default configuration grants the users only very basic permissions on the databases:
-
-* `SELECT`
-* `CREATE`
-* `CREATE VIEW`
-* `CREATE ROUTINE`
-* `CREATE TEMPORARY TABLES`
-* `LOCK TABLES`
-* `INDEX`
-* `TRIGGER`
-* `INSERT`
-* `UPDATE`
-* `DELETE`
-
-This configuration is passed as environment variable `GRANT_PRIVILEGES` to the service as comma-separated string. You
-can add/remove grants by setting this environment variable, e.g. allow the users to only select data and create
-temporary tables:
-
-```yaml title="docker-compose.yml"
-services:
-  dbrepo-metadata-service:
-    environment:
-      GRANT_PRIVILEGES=SELECT,CREATE TEMPORARY TABLES
-      ...
-```
-
-A list of all grants is available in the MariaDB documentation for [`GRANT`](https://mariadb.com/kb/en/grant/)
-
-### Views
-
-The service manages views in the [Data Database](./system-databases-data)
-and [Metadata Database](./system-databases-metadata). Any views that are created outside of DBRepo (e.g. directly via 
-the JDBC API) are periodically fetched by this service (based on the `OBTAIN_METADATA_RATE` environment variable,
-default interval is 60 seconds).
-
 ## Limitations
 
 * No support for other databases than [MariaDB](https://mariadb.org/) because of system-versioning capabilities missing
@@ -136,7 +71,7 @@ default interval is 60 seconds).
 !!! question "Do you miss functionality? Do these limitations affect you?"
 
     We strongly encourage you to help us implement it as we are welcoming contributors to open-source software and get
-    in [contact](./contact) with us, we happily answer requests for collaboration with attached CV and your programming 
+    in [contact](../../contact) with us, we happily answer requests for collaboration with attached CV and your programming 
     experience!
 
 ## Security
diff --git a/.docs/api/python.md b/.docs/api/python.md
index a48fb53d8d474b9643d92f5e17449a92d2d1cd93..ab6b2b69a6b6ca9606232eca534686bc6c0b71ab 100644
--- a/.docs/api/python.md
+++ b/.docs/api/python.md
@@ -8,6 +8,13 @@ author: Martin Weise
 
 [:fontawesome-solid-cube: &nbsp;View Docs](../../python){ .md-button .md-button--primary }
 
+## Overview
+
+The DBRepo Python library is using some of the most pupular and maintained Python packages for Data Scientists under the
+hood. For example: [`requests`](https://requests.readthedocs.io/) to interact with the HTTP API
+endpoints, [`pandas`](https://pandas.pydata.org/) for data operations and [`pydantic`](https://docs.pydantic.dev/) for
+information representation from/to the HTTP API.
+
 ## Installing
 
 :octicons-tag-16:{ title="Minimum version" } 1.4.2
diff --git a/.docs/api/search-service.md b/.docs/api/search-service.md
index fff317d6f8adc093cdf6f725bab31f5bbdb424e7..b48be919d6acec14bbfb3783c6e085f2bcf3e1e9 100644
--- a/.docs/api/search-service.md
+++ b/.docs/api/search-service.md
@@ -6,18 +6,17 @@ author: Martin Weise
 
 !!! debug "Debug Information"
 
-    Image: [`dbrepo/search-service:__APPVERSION__`](https://hub.docker.com/r/dbrepo/search-service)
+    Image: [`registry.datalab.tuwien.ac.at/dbrepo/search-service:1.4.4`](https://hub.docker.com/r/dbrepo/search-service)
 
     * Ports: 4000/tcp
     * Health: `http://<hostname>:4000/api/search/health`
     * Prometheus: `http://<hostname>:4000/metrics`
-    * Swagger UI: `http://<hostname>:4000/swagger-ui/` <a href="../swagger/search" target="_blank">:fontawesome-solid-square-up-right: view online</a>
+    * Swagger UI: `http://<hostname>:4000/swagger-ui/`
 
 ## Overview
 
-This service communicates between the [Search Database](../system-databases-search) and 
-the [User Interface](../system-other-ui) to allow structured search of databases, tables, columns, users, identifiers,
-views, semantic concepts &amp; units of measurements used in databases.
+This service communicates between the Search Database and the [User Interface](../ui) to allow structured search of
+databases, tables, columns, users, identifiers, views, semantic concepts &amp; units of measurements used in databases.
 
 <figure markdown>
 ![Built-in search](../images/screenshots/feature-search.png){ .img-border }
@@ -26,9 +25,9 @@ views, semantic concepts &amp; units of measurements used in databases.
 
 ## Index
 
-There is only one 
+There is only one
 index [`database`](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/raw/dev/dbrepo-search-db/init/indices/database.json)
-that holds all the metadata information which is mirrored from the [Metadata Database](../system-databases-metadata).
+that holds all the metadata information which is mirrored from the [Metadata Database](../metadata-db).
 
 <figure markdown>
 ![Mirroring statistical properties in Metadata Database and Search Database](../images/statistics-mirror.png)
@@ -37,44 +36,16 @@ that holds all the metadata information which is mirrored from the [Metadata Dat
 
 ## Faceted Browsing
 
-This service enables the frontend to search the `database` index with eight different *types* of desired results 
+This service enables the frontend to search the `database` index with eight different *types* of desired results
 (database, table, column, view, identifier, user, concept, unit) and their *facets*.
 
-For example, the [User Interface](../system-other-ui) allows for the search of databases that contain a certain
-semantic concept (provided as URI, e.g. 
-temperature [http://www.wikidata.org/entity/Q11466](http://www.wikidata.org/entity/Q11466)) and unit of measurement 
-(provided as URI, e.g. degree 
+For example, the [User Interface](../ui) allows for the search of databases that contain a certain
+semantic concept (provided as URI, e.g.
+temperature [http://www.wikidata.org/entity/Q11466](http://www.wikidata.org/entity/Q11466)) and unit of measurement
+(provided as URI, e.g. degree
 Celsius [http://www.ontology-of-units-of-measure.org/resource/om-2/degreeCelsius](http://www.ontology-of-units-of-measure.org/resource/om-2/degreeCelsius)).
 
-An example on faceted browsing is found in the [usage examples](../usage-search).
-
-## Unit Independent Search
-
-Since the repository automatically collects statistical properties (min, max, mean, median, std.dev) in both the
-[Metadata Database](../system-databases-metadata) and the [Search Database](../system-databases-search), a special
-search can be performed when at least two columns have the same semantic concept (e.g. temperature) annotated and
-the units of measurements can be transformed.
-
-<figure markdown>
-![Two tables with compatible semantic concepts (Temperature) and units of measurement (left is in degree Celsius, right is in degree Fahrenheit)](../images/statistics-example.png)
-<figcaption>Figure 3: Two tables with compatible semantic concepts and units of measurement</figcaption>
-</figure>
-
-In short, the search service transforms the statistical properties not in the target unit of measurements is transformed
-by using the [`omlib`](https://github.com/dieudonneWillems/OMLib) package. 
-
-For example: a user wants to find datasets that contain *"temperature measurements between 0 - 10 &deg;C"*. Then the 
-search service transforms the query to the dataset on the right from &deg;F to contain *"temperature measurements
-between 32 - 50 &deg;F"* instead.
-
-<figure markdown>
-![Unit independent search query transformation](../images/statistics-example-unit-independent-search.png)
-<figcaption>Figure 4: Unit independent search query transformation</figcaption>
-</figure>
-
-## Examples
-
-View [usage examples](../usage-search/).
+An example on faceted browsing is found in the [usage examples](..).
 
 ## Limitations
 
@@ -86,4 +57,4 @@ View [usage examples](../usage-search/).
 
 ## Security
 
-(nothing)
+(none)
diff --git a/.docs/api/storage-service.md b/.docs/api/storage-service.md
index bf40ca83c8cfde0951a3796df0fbb06e0e486478..a8da4f0721f50cdbc515ca9b0ed240418529f269 100644
--- a/.docs/api/storage-service.md
+++ b/.docs/api/storage-service.md
@@ -6,7 +6,7 @@ author: Martin Weise
 
 !!! debug "Debug Information"
 
-    Image: [`chrislusf/seaweedfs:3.59`](https://hub.docker.com/r/chrislusf/seaweedfs)
+    Image: [`docker.io/chrislusf/seaweedfs:3.59`](https://hub.docker.com/r/chrislusf/seaweedfs)
 
     * Ports: 9000/tcp
     * Prometheus: `http://<hostname>:9091/metrics`
@@ -36,7 +36,7 @@ The default configuration creates two buckets `dbrepo-upload`, `dbrepo-download`
 !!! question "Do you miss functionality? Do these limitations affect you?"
 
     We strongly encourage you to help us implement it as we are welcoming contributors to open-source software and get
-    in [contact](./contact) with us, we happily answer requests for collaboration with attached CV and your programming 
+    in [contact](../../contact) with us, we happily answer requests for collaboration with attached CV and your programming 
     experience!
 
 ## Security
diff --git a/.docs/api/ui.md b/.docs/api/ui.md
index 2acc439097baa4374288c841e2976bb70e592e5c..d187772ce0d0e4c79e20565dd2e9732d051d0382 100644
--- a/.docs/api/ui.md
+++ b/.docs/api/ui.md
@@ -2,6 +2,14 @@
 author: Martin Weise
 ---
 
+## tl;dr
+
+!!! debug "Debug Information"
+
+    Image: [`registry.datalab.tuwien.ac.at/dbrepo/ui:1.4.4`](https://hub.docker.com/r/dbrepo/ui)
+
+    * Ports: 3000/tcp
+
 The User Interface is configured in the `runtimeConfig` section of the `nuxt.config.ts` file during build time. For the
 runtime, you need to override those values through environment variables or by mounting a `.env` file. As a small
 example, you can configure the logo :material-numeric-1-circle-outline: in Figure 2. Make sure you mount the logo as
@@ -27,7 +35,7 @@ if you use a Kubernetes deployment via ConfigMap and Volumes).
 ```yaml title="docker-compose.yml"
 services:
   dbrepo-ui:
-    image: docker.io/dbrepo/ui:__APPVERSION__
+    image: registry.datalab.tuwien.ac.at/dbrepo/ui:1.4.4
     volumes:
       - ./my_logo.png:/app/.output/public/my_logo.png
   ...
diff --git a/.docs/api/upload-service.md b/.docs/api/upload-service.md
index 88812d308ba856a1d7c77e65a1bf97298cb2e968..f8ad58ebcb1f626aa6064db682d3fffcd958c81a 100644
--- a/.docs/api/upload-service.md
+++ b/.docs/api/upload-service.md
@@ -6,46 +6,31 @@ author: Martin Weise
 
 !!! debug "Debug Information"
 
-    Image: [`tusproject/tusd:v1.12`](https://hub.docker.com/r/tusproject/tusd)
+    Image: [`docker.io/tusproject/tusd:v1.12`](https://hub.docker.com/r/tusproject/tusd)
 
     * Ports: 1080/tcp
     * Prometheus: `http://<hostname>:1080/api/upload/metrics`
     * API: `http://<hostname>:1080/api/upload`
-    * Swagger UI: <a href="../swagger/upload" target="_blank">:fontawesome-solid-square-up-right: view online</a>
 
 ## Overview
 
-We use the [TUS](https://tus.io/) open protocol for resumable file uploads which based entirely on HTTP. Even though
+We use the [TUS](https://tus.io/) open protocol for resume-able file uploads which based entirely on HTTP. Even though
 the Upload Service is part of the standard installation, it is an entirely optional component and can be replaced with
 any S3-compatible Blob Storage.
 
-### Settings
-
-The Upload Service is responsible for uploading files (mainly CSV-files) into a Blob Storage that can be accesses trough
-the S3 protocol (e.g. our [Storage Service](../system-services-storage)). Make sure that the Upload Service can be
-accessed from the Gateway Service and set the url in the User Interface configuration file.
-
-```json title="dbrepo.config.json"
-{
-    "upload": {
-       "url": "example.com",
-       "useSsl": true
-    },
-    ...
-}
-```
-
-If your deployment is secured with SSL/TLS (recommended) set the `useSsl` variable to `true`.
-
 ### Architecture
 
-The Upload Service communicates internally with the [Storage Service](../system-services-storage) (c.f. [Figure 1](#fig1)).
+The Upload Service communicates internally with the [Storage Service](../storage-service) (c.f. [Figure 1](#fig1)).
 
 <figure id="fig1" markdown>
 ![Architecture of the Upload Service](../images/architecture-upload-service.svg)
 <figcaption>Figure 1: Architecture of the Upload Service</figcaption>
 </figure>
 
+The Upload Service is responsible for uploading files (mainly CSV-files) into a Blob Storage that can be accesses trough
+the S3 protocol (e.g. our [Storage Service](../storage-service)). Make sure that the Upload Service can be
+accessed from the Gateway Service.
+
 ## Limitations
 
 * No support for authentication.
diff --git a/.docs/concepts/search.md b/.docs/concepts/search.md
index 8ae41c80ba2566dce997eed8ac4052b0cfa23631..8731100f91246b2f56091af373962d30d76796e6 100644
--- a/.docs/concepts/search.md
+++ b/.docs/concepts/search.md
@@ -4,10 +4,34 @@ author: Martin Weise
 
 ## Index
 
-TBD
+tbd
 
 ## Document
 
 TBD
 
-## Query
\ No newline at end of file
+## Query
+
+## Unit Independent Search
+
+Since the repository automatically collects statistical properties (min, max, mean, median, std.dev) in both the
+[Metadata Database](../system-databases-metadata) and the [Search Database](../system-databases-search), a special
+search can be performed when at least two columns have the same semantic concept (e.g. temperature) annotated and
+the units of measurements can be transformed.
+
+<figure markdown>
+![Two tables with compatible semantic concepts (Temperature) and units of measurement (left is in degree Celsius, right is in degree Fahrenheit)](../images/statistics-example.png)
+<figcaption>Figure 3: Two tables with compatible semantic concepts and units of measurement</figcaption>
+</figure>
+
+In short, the search service transforms the statistical properties not in the target unit of measurements is transformed
+by using the [`omlib`](https://github.com/dieudonneWillems/OMLib) package.
+
+For example: a user wants to find datasets that contain *"temperature measurements between 0 - 10 &deg;C"*. Then the
+search service transforms the query to the dataset on the right from &deg;F to contain *"temperature measurements
+between 32 - 50 &deg;F"* instead.
+
+<figure markdown>
+![Unit independent search query transformation](../images/statistics-example-unit-independent-search.png)
+<figcaption>Figure 4: Unit independent search query transformation</figcaption>
+</figure>
\ No newline at end of file
diff --git a/.docs/deployment-helm.md b/.docs/deployment-helm.md
deleted file mode 100644
index 5b0be43553584e6c6be4f582615bc9afcffd918a..0000000000000000000000000000000000000000
--- a/.docs/deployment-helm.md
+++ /dev/null
@@ -1,43 +0,0 @@
----
-author: Martin Weise
----
-
-[![Helm Chart version](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/dbrepo)](https://artifacthub.io/packages/helm/dbrepo/dbrepo){ tabindex=-1 }
-
-## TL;DR
-
-To install DBRepo in your existing cluster, download the
-sample [`values.yaml`](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-deployment/-/raw/master/charts/dbrepo-core/values.yaml?inline=false)
-for your deployment and update the variables, especially `hostname`.
-
-```shell
-helm upgrade --install dbrepo \
-  -n dbrepo \
-  "oci://s210.dl.hpc.tuwien.ac.at/dbrepo/helm/dbrepo" \
-  --values ./values.yaml \
-  --version "__CHARTVERSION__" \
-  --create-namespace \
-  --cleanup-on-fail
-```
-
-This chart is also on [Artifact Hub](https://artifacthub.io/packages/helm/dbrepo/dbrepo) with a full documentation
-about values, etc.
-
-## Prerequisites
-
-* Kubernetes 1.24+
-* Kubernetes 3.8.0+
-* PV provisioner support in the underlying infrastructure
-
-## Limitations
-
-1. MariaDB Galera does not (yet) support XA-transactions required by the authentication service (=Keycloak). Therefore
-   only a single MariaDB pod can be deployed at once for the [auth database](./system-databases-authentication).
-2. The entire Helm deployment is rootless (=`runAsNonRoot=true`) except for
-   the [Storage Service](./system-services-storage/) which still requires a root user.
-
-!!! question "Do you miss functionality? Do these limitations affect you?"
-
-    We strongly encourage you to help us implement it as we are welcoming contributors to open-source software and get
-    in [contact](./contact) with us, we happily answer requests for collaboration with attached CV and your programming 
-    experience!
diff --git a/.docs/index.md b/.docs/index.md
index a6cfdea09b3a8908b58d1ee0d6af38f5cece99e4..4b869b4d8fac9e7cf09ae7cd0e57dacaf077b411 100644
--- a/.docs/index.md
+++ b/.docs/index.md
@@ -5,6 +5,7 @@ author: Martin Weise
 [![CI/CD Pipeline](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/badges/master/pipeline.svg)](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services){ tabindex=-1 }
 [![Code Coverage](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/badges/master/coverage.svg)](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services){ tabindex=-1 }
 [![GitLab Release](https://img.shields.io/gitlab/v/release/fair-data-austria-db-repository%2Ffda-services?gitlab_url=https%3A%2F%2Fgitlab.phaidra.org&display_name=release&style=flat&cacheSeconds=3600)](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services){ tabindex=-1 }
+[![Image Pulls](https://img.shields.io/docker/pulls/dbrepo/data-service?style=flat&cacheSeconds=3600)](https://hub.docker.com/u/dbrepo){ tabindex=-1 }
 [![GitLab License](https://img.shields.io/gitlab/license/fair-data-austria-db-repository%2Ffda-services?gitlab_url=https%3A%2F%2Fgitlab.phaidra.org%2F&style=flat&cacheSeconds=3600)](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services){ tabindex=-1 }
 
 Documentation for version: [v1.4.4](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/releases).
diff --git a/.docs/deployment-docker-compose.md b/.docs/installation.md
similarity index 52%
rename from .docs/deployment-docker-compose.md
rename to .docs/installation.md
index 7b6d9922561ddeafee790da540e301b65e09f74c..2561b4944b65de05a6f79b55502ad2d61ad15906 100644
--- a/.docs/deployment-docker-compose.md
+++ b/.docs/installation.md
@@ -2,16 +2,20 @@
 author: Martin Weise
 ---
 
-# Docker Compose
+# Installation
+
+[![Image Pulls](https://img.shields.io/docker/pulls/dbrepo/data-service?style=flat&cacheSeconds=3600)](https://hub.docker.com/u/dbrepo){ tabindex=-1 }
 
 ## TL;DR
 
 If you have [Docker](https://docs.docker.com/engine/install/) already installed on your system, you can install DBRepo with:
 
 ```shell
-curl -sSL https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/raw/release-__APPVERSION__/install.sh | bash
+curl -sSL https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/raw/release-1.4.4/install.sh | bash
 ```
 
+Or perform a [custom install](#custom-install).
+
 ## Requirements
 
 ### Hardware
@@ -26,7 +30,7 @@ the following settings.
 
 *Optional*: public IP-address if you want to secure the deployment with a (free) TLS-certificate from Let's Encrypt.
 
-!!! tip "Resource Consumption"
+!!! info "Resource Consumption"
 
     Note that most of the vCPU and RAM resources will be needed for starting the infrastructure, this is because of
     Docker. During operation and especially idle times, the deployment will use significantly less resources.
@@ -34,13 +38,15 @@ the following settings.
 ### Software
 
 We only test the Docker Compose deployment with the 
-official [Docker engine](https://docs.docker.com/engine/install/debian/) installed on 
+official [Docker Engine](https://docs.docker.com/engine/install/debian/) installed on 
 a [Debian](https://www.debian.org/)-based operating system. Other software deployments (e.g. Docker Desktop on Windows)
 are *not* recommended and not tested.
 
-## Architecture
+## Custom Install
+
+TBD
 
-### Overview
+## Architecture
 
 The repository is designed as a service-based architecture to ensure scalability and the utilization of various
 technologies. The conceptualized microservices operate the basic database operations, data versioning as well as
@@ -51,9 +57,7 @@ technologies. The conceptualized microservices operate the basic database operat
 <figcaption>Architecture of the services deployed via Docker Compose</figcaption>
 </figure>
 
-### Notes
-
-Please note that we only save the state of the databases as well as the [Broker Service](./system-services-broker)
+Please note that we only save the state of the databases as well as the [Broker Service](../broker-service)
 since RabbitMQ maintains state inside the container.
 
 ## Deployment
@@ -61,43 +65,6 @@ since RabbitMQ maintains state inside the container.
 We maintain a rapid prototype deployment option through Docker Compose (v2.17.0 and newer). This deployment creates the
 core infrastructure and a single Docker container for all user-generated databases.
 
-=== "Linux"
-
-    Download and install [Docker Engine](https://docs.docker.com/desktop/install/linux-install/) for your Linux
-    distribution. Although the installation might work, we *do not* recommend Docker Desktop.
-    
-    Ensure the Docker daemon is running at all times:
-
-        systemctl enable docker --now
-
-    Install DBRepo with the default configuration:
-
-        curl -sSL https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/raw/dev/install.sh | bash
-
-=== "Windows"
-
-    Open `cmd.exe` as administrator and install WSL2 and the Debian subsystem:
-
-        wsl --install Debian
-
-    Open `optionalfeatures` by typing into the open terminal window or searching for it and enable "Windows Subsystem 
-    for Linux":
-
-    <figure markdown>
-    ![Data ingest](images/optionalfeatures.png){ .img-border }
-       <figcaption>Enable Subsystem for Linux in Windows Features</figcaption>
-    </figure>
-
-    Install [Docker Desktop](https://docs.docker.com/desktop/install/windows-install/) on the Windows host machine.
-    Open Docker Desktop and go to settings (:fontawesome-solid-gear:) > General > Tick "Use WSL2 based engine" if not
-    already ticked.
-
-    Open the Debian container by typing "Debian" into the search, you should see a terminal window.
-
-    Install DBRepo with the default configuration from the Debian container:
-
-        curl -sSL https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/raw/master/install.sh | bash
-
 View the logs:
 
     docker compose logs -f
@@ -147,50 +114,11 @@ Please be warned that the default configuration is not intended for public deplo
 running system within minutes to play around within the system and explore features. It is strongly advised to change 
 the default `.env` environment variables.
 
-Next, create a [user account](./usage-overview/#create-user-account) and 
-then [create a database](./usage-overview/#create-database) to [import a dataset](./usage-overview/#import-dataset).
-
-## Security
-
-!!! warning "Known security issues with the default configuration"
-
-    The system is auto-configured for a small, local, test deployment and is *not* secure! You need to make modifications
-    in various places to make it secure:
-
-    * **Authentication Service**:
-
-        a. You need to use your own instance or configure a secure instance using a (self-signed) certificate.
-           Additionally, when serving from a non-default Authentication Service, you need to put it into the 
-           `JWT_ISSUER` environment variable (`.env`).
-
-        b. You need to change the default admin user `fda` password in Realm
-           master > Users > fda > Credentials > Reset password.
-
-        c. You need to change the client secrets for the clients `dbrepo-client` and `broker-client`. Do this in Realm
-           dbrepo > Clients > dbrepo-client > Credentials > Client secret > Regenerate. Do the same for the
-           broker-client.
-
-        d. You need to regenerate the public key of the `RS256` algorithm which is shared with all services to verify 
-           the signature of JWT tokens. Add your securely generated private key in Realm 
-           dbrepo > Realm settings > Keys > Providers > Add provider > rsa.
-
-    * **Broker Service**: by default, this service is configured with an administrative user that has major privileges.
-      You need to change the password of the user *fda* in Admin > Update this user > Password. We found this
-      [simple guide](https://onlinehelp.coveo.com/en/ces/7.0/administrator/changing_the_rabbitmq_administrator_password.htm)
-      to be very useful.
-
-    * **Search Database**: by default, this service is configured to require authentication with an administrative user
-      that is allowed to write into the indizes. Following
-      this [simple guide](https://www.elastic.co/guide/en/elasticsearch/reference/8.7/reset-password.html), this can be
-      achieved using the command line.
-
-    * **Gateway Service**: by default, no HTTPS is used that protects the services behind. You need to provide a trusted
-      SSL/TLS certificate in the configuration file or use your own proxy in front of the Gateway Service. See this
-      [simple guide](http://nginx.org/en/docs/http/configuring_https_servers.html) on how to install a SSL/TLS
-      certificate on NGINX.
+Next, create a [user account](../api/#create-user-account) and 
+then [create a database](../api/#create-database) to [import a dataset](../api/#import-dataset).
 
 ## Limitations
 
 !!! info "Alternative Deployments"
 
-    Alternatively, you can also deploy DBRepo with [Helm](./deployment-helm/) in your virtual machine instead.
+    Alternatively, you can also deploy DBRepo with [Kubernetes](../deployment-helm) in your virtual machine instead.
diff --git a/.docs/kubernetes.md b/.docs/kubernetes.md
new file mode 100644
index 0000000000000000000000000000000000000000..608502e320b56ac829d92b781ea19c8128b927c5
--- /dev/null
+++ b/.docs/kubernetes.md
@@ -0,0 +1,70 @@
+---
+author: Martin Weise
+---
+
+[![Helm Chart version](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/dbrepo)](https://artifacthub.io/packages/helm/dbrepo/dbrepo){ tabindex=-1 }
+
+## TL;DR
+
+To install DBRepo in your existing cluster, download the
+sample [`values.yaml`](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/blob/release-1.4.4/helm/dbrepo/values.yaml)
+for your deployment and update the variables, especially `hostname`.
+
+```shell
+helm upgrade --install dbrepo \
+  -n dbrepo \
+  "oci://registry.datalab.tuwien.ac.at/dbrepo/helm/dbrepo" \
+  --values ./values.yaml \
+  --version "1.4.4" \
+  --create-namespace \
+  --cleanup-on-fail
+```
+
+This chart is also on [Artifact Hub](https://artifacthub.io/packages/helm/dbrepo/dbrepo) with a full documentation
+about values, etc. Before installing, you need to change credentials, e.g. the Broker Service administrator user
+password:
+
+```yaml title="values.yaml"
+brokerservice:
+  ...
+  auth:
+    ...
+    username: broker
+    password: broker
+    passwordHash: 1gwjNNTBPKLgyzbsUykfR0JIFC6nNqbNJaxzZ14uPT8JGcTZ
+```
+
+The `brokerservice.auth.passwordHash` field is the RabbitMQ SHA512-hash of the `brokerservice.auth.password` field and
+can be obtained with
+the [`generate-rabbitmq-pw.sh`](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/blob/release-1.4.4/helm/dbrepo/hack/generate-rabbitmq-pw.sh)
+script:
+
+```console
+$ ./generate-rabbitmq-pw.sh my_password
+klPdmv4dgnRH64czHolIHAfXvc0G9hc24FQmPlI6eeI1NOf9
+```
+
+The script needs the package `xxd` for generation of the random salt. If you don't have `xxd` installed, install it:
+
+* Debian/Ubuntu: `apt install xxd`
+* Windows: `choco install xxd`
+* MacOS: `brew install coreutils`
+
+## Prerequisites
+
+* Kubernetes 1.24+
+* Kubernetes 3.8.0+
+* PV provisioner support in the underlying infrastructure
+
+## Limitations
+
+1. MariaDB Galera does not (yet) support XA-transactions required by the authentication service (=Keycloak). Therefore
+   only a single MariaDB pod can be deployed at once for the Auth database.
+2. The entire Helm deployment is rootless (=`runAsNonRoot=true`) except for
+   the [Storage Service](../api/storage-service) which still requires a root user.
+
+!!! question "Do you miss functionality? Do these limitations affect you?"
+
+    We strongly encourage you to help us implement it as we are welcoming contributors to open-source software and get
+    in [contact](../../contact) with us, we happily answer requests for collaboration with attached CV and your programming 
+    experience!
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index d2552eb0549be5b56435d025754101b8260a2401..f969aa04ac69a7817a367d0a2daf8817fd200e53 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -121,10 +121,11 @@ build-helm:
     refs:
       - /^release-.*/
   before_script:
+    - echo "$CI_GPG_KEYRING" | base64 -d > ./secring.gpg
     - echo "$CI_REGISTRY_PASSWORD" | docker login --username "$CI_REGISTRY_USER" --password-stdin $CI_REGISTRY_URL
   script:
     - apk add sed helm curl
-    - helm package ./helm/dbrepo --destination ./build
+    - helm package ./helm/dbrepo --sign --key 'Martin Weise' --keyring ./secring.gpg --destination ./build
 
 verify-install-script:
   image: docker.io/docker:24-dind
@@ -581,14 +582,16 @@ release-images:
     refs:
       - /^release-.*/
   before_script:
-    - "echo ${CI_REGISTRY_PASSWORD} | docker login --username ${CI_REGISTRY_USER} --password-stdin $CI_REGISTRY_URL"
-    - "echo ${CI_REGISTRY2_PASSWORD} | docker login --username ${CI_REGISTRY2_USER} --password-stdin $CI_REGISTRY2_URL"
+    - "docker logout ${CI_REGISTRY_URL}"
+    - "echo ${CI_REGISTRY_PASSWORD} | docker login --username ${CI_REGISTRY_USER} --password-stdin ${CI_REGISTRY_URL}"
+    - "docker logout ${CI_REGISTRY2_URL}"
+    - "echo ${CI_REGISTRY2_PASSWORD} | docker login --username ${CI_REGISTRY2_USER} --password-stdin ${CI_REGISTRY2_URL}"
     - "ifconfig eth0 mtu 1450 up"
     - "apk add make bash"
   script:
     - "make release-images"
 
-release-chart:
+release-helm:
   stage: release
   image: docker:24-dind
   only:
@@ -598,12 +601,16 @@ release-chart:
     refs:
       - release-latest
   before_script:
-    - "echo ${CI_REGISTRY2_PASSWORD} | docker login --username ${CI_REGISTRY2_USER} --password-stdin $CI_REGISTRY2_URL"
+    - "docker logout ${CI_REGISTRY_URL}"
+    - "echo ${CI_REGISTRY_PASSWORD} | docker login --username ${CI_REGISTRY_USER} --password-stdin ${CI_REGISTRY_URL}"
+    - "docker logout ${CI_REGISTRY2_URL}"
+    - "echo ${CI_REGISTRY2_PASSWORD} | docker login --username ${CI_REGISTRY2_USER} --password-stdin ${CI_REGISTRY2_URL}"
     - "apk add sed helm curl"
-    - "helm package ./helm/dbrepo --destination ./build"
+    - echo "$CI_GPG_KEYRING" | base64 -d > ./secring.gpg
+    - helm package ./helm/dbrepo --sign --key 'Martin Weise' --keyring ./secring.gpg --destination ./build
     - "helm plugin install https://github.com/sigstore/helm-sigstore"
   script:
-    - "helm push ./build/dbrepo-${CHART_VERSION}.tgz oci://${CI_REGISTRY2_URL}/helm"
+    - "helm sigstore upload ./build/dbrepo-${CHART_VERSION}.tgz oci://${CI_REGISTRY2_URL}/helm"
 
 release-docs:
   stage: release
@@ -612,11 +619,9 @@ release-docs:
     refs:
       - /^release-.*/
   before_script:
-    - "wget https://github.com/mikefarah/yq/releases/download/v4.2.0/yq_linux_amd64 -O /usr/bin/yq"
-    - "chmod +x /usr/bin/yq"
     - "apk add --update alpine-sdk bash sed wget openssh"
     - "pip install -r ./requirements.txt"
-    - "mkdir -p ./final/${APP_VERSION}/swagger"
+    - "mkdir -p ./final/${APP_VERSION}/rest"
   script:
     - "make gen-lib-doc gen-docs-doc"
     - "cp -r ./lib/python/docs/build/html ./final/${APP_VERSION}/python" # sphinx
@@ -633,7 +638,7 @@ release-docs:
     - "scp -oHostKeyAlgorithms=+ssh-rsa -oPubkeyAcceptedAlgorithms=+ssh-rsa final.tar.gz $CI_DOC_USER@$CI_DOC_IP:final.tar.gz"
     - "scp -oHostKeyAlgorithms=+ssh-rsa -oPubkeyAcceptedAlgorithms=+ssh-rsa versions.json $CI_DOC_USER@$CI_DOC_IP:/system/user/ifs/infrastructures/public_html/dbrepo/versions.json"
     - "scp -oHostKeyAlgorithms=+ssh-rsa -oPubkeyAcceptedAlgorithms=+ssh-rsa .docs/redirect.html $CI_DOC_USER@$CI_DOC_IP:/system/user/ifs/infrastructures/public_html/dbrepo/index.html"
-    - "ssh -oHostKeyAlgorithms=+ssh-rsa -oPubkeyAcceptedAlgorithms=+ssh-rsa $CI_DOC_USER@$CI_DOC_IP 'rm -rf /system/user/ifs/infrastructures/public_html/dbrepo/${APP_VERSION}; tar xzf ./final.tar.gz; rm -f ./final.tar.gz; cp -r ./final/* /system/user/ifs/infrastructures/public_html/dbrepo/${APP_VERSION}; rm -rf ./final'"
+    - 'ssh -oHostKeyAlgorithms=+ssh-rsa -oPubkeyAcceptedAlgorithms=+ssh-rsa $CI_DOC_USER@$CI_DOC_IP "rm -rf /system/user/ifs/infrastructures/public_html/dbrepo/${APP_VERSION}; tar xzf ./final.tar.gz; rm -f ./final.tar.gz; cp -r ./final/* /system/user/ifs/infrastructures/public_html/dbrepo/${APP_VERSION}; rm -rf ./final"'
 
 release-libs:
   stage: release
diff --git a/Makefile b/Makefile
index ea9a9d0ce3f29abf801f4a7769ad788259d33629..d3438ab7d5cfa78cfc7a5b829c37b917c14a42a9 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 
 APP_VERSION ?= 1.4.4
 CHART_VERSION ?= 1.4.4
-REPOSITORY_URL ?= docker.io/dbrepo
+REPOSITORY_URL ?= registry.datalab.tuwien.ac.at/dbrepo
 
 .PHONY: all
 all: help
diff --git a/dbrepo-analyse-service/Pipfile.lock b/dbrepo-analyse-service/Pipfile.lock
index 2ff8fb46f4e99f2f0b27aea880d3d3b7779bfabb..9a4ce182e44f3a2d2decb81b875d0ba268ab27ba 100644
--- a/dbrepo-analyse-service/Pipfile.lock
+++ b/dbrepo-analyse-service/Pipfile.lock
@@ -167,19 +167,19 @@
         },
         "boto3": {
             "hashes": [
-                "sha256:42b140fc850cf261ee4b1e8ef527fa071b1f1592a6d6a68d34b29f37cc46b4dd",
-                "sha256:56bec52d485d5670ce96d53ae7b2cd4ae4e8a705fb2298a21093cdd77d642331"
+                "sha256:7e8418b47dd43954a9088d504541bed8a42b6d06e712d02befba134c1c4d7c6d",
+                "sha256:7f676daef674fe74f34ce4063228eccc6e60c811f574720e31f230296c4bf29a"
             ],
             "index": "pypi",
-            "version": "==1.34.123"
+            "version": "==1.34.126"
         },
         "botocore": {
             "hashes": [
-                "sha256:8c34ada2a708c82e7174bff700611643db7ce2cb18f1130c35045c24310d299d",
-                "sha256:a8577f6574600c4d159b5cd103ee05744a443d77f7778304e17307940b369c4f"
+                "sha256:7a8ccb6a7c02456757a984a3a44331b6f51c94cb8b9b287cd045122fd177a4b0",
+                "sha256:7eff883c638fe30e0b036789df32d851e093d12544615a3b90062b42ac85bdbc"
             ],
             "markers": "python_version >= '3.8'",
-            "version": "==1.34.123"
+            "version": "==1.34.126"
         },
         "certifi": {
             "hashes": [
@@ -1050,11 +1050,11 @@
         },
         "pydantic": {
             "hashes": [
-                "sha256:c46c76a40bb1296728d7a8b99aa73dd70a48c3510111ff290034f860c99c419e",
-                "sha256:ea91b002777bf643bb20dd717c028ec43216b24a6001a280f83877fd2655d0b4"
+                "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52",
+                "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"
             ],
             "index": "pypi",
-            "version": "==2.7.3"
+            "version": "==2.7.4"
         },
         "pydantic-core": {
             "hashes": [
diff --git a/dbrepo-analyse-service/lib/dbrepo-1.4.4-py3-none-any.whl b/dbrepo-analyse-service/lib/dbrepo-1.4.4-py3-none-any.whl
index 503cfef91315990bbf06027d6de14c8b3184507b..7e8fd7fca5aa6158bf57952f7f1050a08b331402 100644
Binary files a/dbrepo-analyse-service/lib/dbrepo-1.4.4-py3-none-any.whl and b/dbrepo-analyse-service/lib/dbrepo-1.4.4-py3-none-any.whl differ
diff --git a/dbrepo-analyse-service/lib/dbrepo-1.4.4.tar.gz b/dbrepo-analyse-service/lib/dbrepo-1.4.4.tar.gz
index 9a90176f0a093b05d89d1cd74cf701cd0730861a..3e45d4513a31a1fda334ed9ad2c5cbad3803199a 100644
Binary files a/dbrepo-analyse-service/lib/dbrepo-1.4.4.tar.gz and b/dbrepo-analyse-service/lib/dbrepo-1.4.4.tar.gz differ
diff --git a/dbrepo-data-service/pom.xml b/dbrepo-data-service/pom.xml
index 3df58f676f876a8729e6485eef9291717404a242..3d3182bb231a8c0bb82c7bd4a8b99cc076ba20da 100644
--- a/dbrepo-data-service/pom.xml
+++ b/dbrepo-data-service/pom.xml
@@ -30,26 +30,6 @@
             <email>martin.weise@tuwien.ac.at</email>
             <organization>TU Wien</organization>
         </developer>
-        <developer>
-            <name>Moritz Staudinger</name>
-            <email>moritz.staudinger@tuwien.ac.at</email>
-            <organization>TU Wien</organization>
-        </developer>
-        <developer>
-            <name>Tobias Grantner</name>
-            <email>tobias.grantner@tuwien.ac.at</email>
-            <organization>TU Wien</organization>
-        </developer>
-        <developer>
-            <name>Sotirios Tsepelakis</name>
-            <email>sotirios.tsepelakis@tuwien.ac.at</email>
-            <organization>TU Wien</organization>
-        </developer>
-        <developer>
-            <name>Geoffrey Karnbach</name>
-            <email>geoffrey.karnbach@tuwien.ac.at</email>
-            <organization>TU Wien</organization>
-        </developer>
     </developers>
 
     <properties>
@@ -176,6 +156,11 @@
             <artifactId>commons-validator</artifactId>
             <version>${commons-validator.version}</version>
         </dependency>
+        <dependency>
+            <groupId>com.fasterxml.jackson.datatype</groupId>
+            <artifactId>jackson-datatype-hibernate6</artifactId>
+            <version>${jackson-datatype.version}</version>
+        </dependency>
         <!-- Authentication -->
         <dependency>
             <groupId>com.auth0</groupId>
diff --git a/dbrepo-data-service/rest-service/src/main/java/at/tuwien/endpoints/AccessEndpoint.java b/dbrepo-data-service/rest-service/src/main/java/at/tuwien/endpoints/AccessEndpoint.java
index 95332db436c0c2ee56ebb3c91b090fe2b4144e8a..4966e008424aa6e1f290c70b29e97d4b55e72273 100644
--- a/dbrepo-data-service/rest-service/src/main/java/at/tuwien/endpoints/AccessEndpoint.java
+++ b/dbrepo-data-service/rest-service/src/main/java/at/tuwien/endpoints/AccessEndpoint.java
@@ -8,7 +8,6 @@ import at.tuwien.api.user.UserDto;
 import at.tuwien.exception.*;
 import at.tuwien.gateway.MetadataServiceGateway;
 import at.tuwien.service.AccessService;
-import io.micrometer.observation.annotation.Observed;
 import io.swagger.v3.oas.annotations.Operation;
 import io.swagger.v3.oas.annotations.media.Content;
 import io.swagger.v3.oas.annotations.media.Schema;
diff --git a/dbrepo-data-service/rest-service/src/test/java/at/tuwien/service/SchemaServiceIntegrationTest.java b/dbrepo-data-service/rest-service/src/test/java/at/tuwien/service/SchemaServiceIntegrationTest.java
index db00195e049295bfd4b054f831ffd2c50f17f01c..cc644769272b4697cbe6091284287690b10c1944 100644
--- a/dbrepo-data-service/rest-service/src/test/java/at/tuwien/service/SchemaServiceIntegrationTest.java
+++ b/dbrepo-data-service/rest-service/src/test/java/at/tuwien/service/SchemaServiceIntegrationTest.java
@@ -59,7 +59,7 @@ public class SchemaServiceIntegrationTest extends AbstractUnitTest {
     }
 
     @Test
-    public void inspectTable_succeeds() throws TableNotFoundException, SQLException, QueryMalformedException {
+    public void inspectTable_succeeds() throws TableNotFoundException, SQLException {
 
         /* test */
         final TableDto response = schemaService.inspectTable(DATABASE_1_PRIVILEGED_DTO, "not_in_metadata_db");
@@ -95,7 +95,7 @@ public class SchemaServiceIntegrationTest extends AbstractUnitTest {
     }
 
     @Test
-    public void inspectTableEnum_succeeds() throws TableNotFoundException, SQLException, QueryMalformedException {
+    public void inspectTableEnum_succeeds() throws TableNotFoundException, SQLException {
 
         /* test */
         final TableDto response = schemaService.inspectTable(DATABASE_2_PRIVILEGED_DTO, "experiments");
@@ -135,7 +135,7 @@ public class SchemaServiceIntegrationTest extends AbstractUnitTest {
     }
 
     @Test
-    public void inspectTableFullConstraints_succeeds() throws TableNotFoundException, SQLException, QueryMalformedException {
+    public void inspectTableFullConstraints_succeeds() throws TableNotFoundException, SQLException {
 
         /* test */
         final TableDto response = schemaService.inspectTable(DATABASE_1_PRIVILEGED_DTO, "weather_aus");
@@ -241,7 +241,7 @@ public class SchemaServiceIntegrationTest extends AbstractUnitTest {
     }
 
     @Test
-    public void inspectTable_multipleForeignKeyReferences_succeeds() throws TableNotFoundException, SQLException, QueryMalformedException {
+    public void inspectTable_multipleForeignKeyReferences_succeeds() throws TableNotFoundException, SQLException {
 
         /* test */
         final TableDto response = schemaService.inspectTable(DATABASE_1_PRIVILEGED_DTO, "complex_foreign_keys");
@@ -294,7 +294,7 @@ public class SchemaServiceIntegrationTest extends AbstractUnitTest {
     }
 
     @Test
-    public void inspectTable_multiplePrimaryKey_succeeds() throws TableNotFoundException, SQLException, QueryMalformedException {
+    public void inspectTable_multiplePrimaryKey_succeeds() throws TableNotFoundException, SQLException {
 
         /* test */
         final TableDto response = schemaService.inspectTable(DATABASE_1_PRIVILEGED_DTO, "complex_primary_key");
diff --git a/dbrepo-data-service/services/src/main/java/at/tuwien/config/JacksonConfig.java b/dbrepo-data-service/services/src/main/java/at/tuwien/config/JacksonConfig.java
new file mode 100644
index 0000000000000000000000000000000000000000..d6a30b52508a71c5ff9f3ad33b7e90f8846caa90
--- /dev/null
+++ b/dbrepo-data-service/services/src/main/java/at/tuwien/config/JacksonConfig.java
@@ -0,0 +1,29 @@
+package at.tuwien.config;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.SerializationFeature;
+import com.fasterxml.jackson.datatype.hibernate6.Hibernate6Module;
+import com.fasterxml.jackson.datatype.jdk8.Jdk8Module;
+import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+import java.util.TimeZone;
+
+@Slf4j
+@Configuration
+public class JacksonConfig {
+
+    @Bean
+    public ObjectMapper objectMapper() {
+        final ObjectMapper objectMapper = new ObjectMapper();
+        objectMapper.registerModule(new Jdk8Module());
+        objectMapper.registerModule(new JavaTimeModule());
+        objectMapper.registerModule(new Hibernate6Module()); /* lazy load mapping on REST endpoints */
+        objectMapper.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false);
+        objectMapper.setTimeZone(TimeZone.getTimeZone("UTC"));
+        return objectMapper;
+    }
+
+}
diff --git a/dbrepo-data-service/services/src/main/java/at/tuwien/mapper/DataMapper.java b/dbrepo-data-service/services/src/main/java/at/tuwien/mapper/DataMapper.java
index 1516d698bdf0fbe1d702abfb32ff23b9303ac98c..163ec5940fb36d2a228f61a745773cbdea9f0a8f 100644
--- a/dbrepo-data-service/services/src/main/java/at/tuwien/mapper/DataMapper.java
+++ b/dbrepo-data-service/services/src/main/java/at/tuwien/mapper/DataMapper.java
@@ -1,23 +1,80 @@
 package at.tuwien.mapper;
 
+import at.tuwien.api.container.image.ImageDateDto;
+import at.tuwien.api.database.DatabaseDto;
+import at.tuwien.api.database.ViewColumnDto;
+import at.tuwien.api.database.ViewDto;
+import at.tuwien.api.database.query.QueryDto;
+import at.tuwien.api.database.query.QueryResultDto;
+import at.tuwien.api.database.table.TableBriefDto;
 import at.tuwien.api.database.table.TableDto;
+import at.tuwien.api.database.table.TableHistoryDto;
+import at.tuwien.api.database.table.TableStatisticDto;
+import at.tuwien.api.database.table.columns.ColumnBriefDto;
 import at.tuwien.api.database.table.columns.ColumnDto;
+import at.tuwien.api.database.table.columns.ColumnStatisticDto;
 import at.tuwien.api.database.table.columns.ColumnTypeDto;
+import at.tuwien.api.database.table.constraints.ConstraintsDto;
+import at.tuwien.api.database.table.constraints.foreign.ForeignKeyBriefDto;
+import at.tuwien.api.database.table.constraints.foreign.ForeignKeyDto;
+import at.tuwien.api.database.table.constraints.foreign.ForeignKeyReferenceDto;
+import at.tuwien.api.database.table.constraints.foreign.ReferenceTypeDto;
+import at.tuwien.api.database.table.constraints.primary.PrimaryKeyDto;
+import at.tuwien.api.database.table.constraints.unique.UniqueDto;
+import at.tuwien.config.QueryConfig;
+import at.tuwien.exception.QueryNotFoundException;
+import at.tuwien.exception.TableNotFoundException;
+import com.github.dockerjava.zerodep.shaded.org.apache.commons.codec.binary.Hex;
+import com.google.common.hash.Hashing;
+import net.sf.jsqlparser.JSQLParserException;
+import net.sf.jsqlparser.parser.CCJSqlParserManager;
+import net.sf.jsqlparser.schema.Column;
+import net.sf.jsqlparser.statement.select.*;
+import org.jetbrains.annotations.NotNull;
 import org.mapstruct.Mapper;
+import org.mapstruct.Mapping;
+import org.mapstruct.Mappings;
 import org.testcontainers.shaded.org.apache.commons.io.FileUtils;
 
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
+import java.io.StringReader;
+import java.math.BigInteger;
+import java.nio.charset.StandardCharsets;
 import java.sql.*;
-import java.util.Map;
+import java.sql.Date;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.ZoneId;
+import java.time.format.DateTimeFormatter;
+import java.time.format.DateTimeFormatterBuilder;
+import java.util.*;
 import java.util.stream.Collectors;
+import java.util.stream.Stream;
 
 @Mapper(componentModel = "spring")
 public interface DataMapper {
 
     org.slf4j.Logger log = org.slf4j.LoggerFactory.getLogger(DataMapper.class);
 
+    DateTimeFormatter mariaDbFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss[.SSSSSS]")
+            .withZone(ZoneId.of("UTC"));
+
+    /* redundant */
+    ColumnBriefDto columnDtoToColumnBriefDto(ColumnDto data);
+
+    /* redundant */
+    @Mappings({
+            @Mapping(target = "databaseId", source = "tdbid")
+    })
+    TableBriefDto tableDtoToTableBriefDto(TableDto data);
+
+    /* redundant */
+    ColumnDto viewColumnDtoToColumnDto(ViewColumnDto data);
+
+    ForeignKeyBriefDto foreignKeyDtoToForeignKeyBriefDto(ForeignKeyDto data);
+
     default String rabbitMqTupleToInsertOrUpdateQuery(TableDto table, Map<String, Object> data) {
         /* parameterized query for prepared statement */
         final StringBuilder statement = new StringBuilder("INSERT INTO `")
@@ -37,6 +94,579 @@ public interface DataMapper {
         return statement.toString();
     }
 
+    /**
+     * Map the inspected schema to either an existing view/table and append e.g. column or (if not existing) create a new view/table.
+     * @param database The database.
+     * @param resultSet The inspected schema.
+     * @return The database containing the updated view/table.
+     * @throws SQLException
+     */
+    default ViewDto schemaResultSetToView(DatabaseDto database, ResultSet resultSet) throws SQLException {
+        return ViewDto.builder()
+                .name(resultSet.getString(1))
+                .internalName(resultSet.getString(1))
+                .vdbid(database.getId())
+                .database(database)
+                .isInitialView(false)
+                .isPublic(database.getIsPublic())
+                .query(resultSet.getString(9))
+                .queryHash(Hashing.sha256()
+                        .hashString(resultSet.getString(9), StandardCharsets.UTF_8)
+                        .toString())
+                .columns(new LinkedList<>())
+                .identifiers(new LinkedList<>())
+                .creator(database.getOwner())
+                .createdBy(database.getOwner().getId())
+                .build();
+    }
+
+    default TableStatisticDto resultSetToTableStatistic(ResultSet data) throws SQLException {
+        final TableStatisticDto statistic = TableStatisticDto.builder()
+                .columns(new LinkedHashMap<>())
+                .build();
+        while (data.next()) {
+            final ColumnStatisticDto columnStatistic = ColumnStatisticDto.builder()
+                    .min(data.getBigDecimal(2))
+                    .max(data.getBigDecimal(3))
+                    .median(data.getBigDecimal(4))
+                    .mean(data.getBigDecimal(5))
+                    .stdDev(data.getBigDecimal(6))
+                    .build();
+            statistic.getColumns().put(data.getString(1), columnStatistic);
+        }
+        return statistic;
+    }
+
+    default TableDto resultSetToTable(ResultSet resultSet, TableDto table, QueryConfig queryConfig) throws SQLException {
+        final ColumnDto column = ColumnDto.builder()
+                .ordinalPosition(resultSet.getInt(1) - 1) /* start at zero */
+                .autoGenerated(resultSet.getString(2) != null && resultSet.getString(2).startsWith("nextval"))
+                .isNullAllowed(resultSet.getString(3).equals("YES"))
+                .columnType(ColumnTypeDto.valueOf(resultSet.getString(4).toUpperCase()))
+                .d(resultSet.getString(7) != null ? resultSet.getLong(7) : null)
+                .name(resultSet.getString(10))
+                .internalName(resultSet.getString(10))
+                .table(table)
+                .tableId(table.getId())
+                .databaseId(table.getTdbid())
+                .description(resultSet.getString(11))
+                .build();
+        if (column.getColumnType().equals(ColumnTypeDto.ENUM)) {
+            column.setEnums(Arrays.stream(resultSet.getString(8)
+                            .substring(0, resultSet.getString(8).length() - 1)
+                            .replace("enum(", "")
+                            .split(","))
+                    .map(value -> value.replace("'", ""))
+                    .toList());
+        }
+        if (column.getColumnType().equals(ColumnTypeDto.SET)) {
+            column.setSets(Arrays.stream(resultSet.getString(8)
+                            .substring(0, resultSet.getString(8).length() - 1)
+                            .replace("set(", "")
+                            .split(","))
+                    .map(value -> value.replace("'", ""))
+                    .toList());
+        }
+        /* constraints */
+        if (resultSet.getString(9) != null && resultSet.getString(9).equals("PRI")) {
+            table.getConstraints().getPrimaryKey().add(PrimaryKeyDto.builder()
+                    .table(tableDtoToTableBriefDto(table))
+                    .column(columnDtoToColumnBriefDto(column))
+                    .build());
+        }
+        /* fix boolean and set size for others */
+        if (resultSet.getString(8).equalsIgnoreCase("tinyint(1)")) {
+            column.setColumnType(ColumnTypeDto.BOOL);
+        } else if (resultSet.getString(5) != null) {
+            column.setSize(resultSet.getLong(5));
+        } else if (resultSet.getString(6) != null) {
+            column.setSize(resultSet.getLong(6));
+        }
+        if (column.getColumnType().equals(ColumnTypeDto.TIMESTAMP) || column.getColumnType().equals(ColumnTypeDto.DATETIME)) {
+            column.setDateFormat(ImageDateDto.builder()
+                    .id(queryConfig.getDefaultTimestampFormatId())
+                    .build());
+        } else if (column.getColumnType().equals(ColumnTypeDto.DATE)) {
+            column.setDateFormat(ImageDateDto.builder()
+                    .id(queryConfig.getDefaultDateFormatId())
+                    .build());
+        } else if (column.getColumnType().equals(ColumnTypeDto.TIME)) {
+            column.setDateFormat(ImageDateDto.builder()
+                    .id(queryConfig.getDefaultTimeFormatId())
+                    .build());
+        }
+        table.getColumns()
+                .add(column);
+        return table;
+    }
+
+    default ViewDto resultSetToTable(ResultSet resultSet, ViewDto view, QueryConfig queryConfig) throws SQLException {
+        final ViewColumnDto column = ViewColumnDto.builder()
+                .ordinalPosition(resultSet.getInt(1) - 1) /* start at zero */
+                .autoGenerated(resultSet.getString(2) != null && resultSet.getString(2).startsWith("nextval"))
+                .isNullAllowed(resultSet.getString(3).equals("YES"))
+                .columnType(ColumnTypeDto.valueOf(resultSet.getString(4).toUpperCase()))
+                .d(resultSet.getString(7) != null ? resultSet.getLong(7) : null)
+                .name(resultSet.getString(10))
+                .internalName(resultSet.getString(10))
+                .databaseId(view.getDatabase().getId())
+                .build();
+        /* fix boolean and set size for others */
+        if (resultSet.getString(8).equalsIgnoreCase("tinyint(1)")) {
+            column.setColumnType(ColumnTypeDto.BOOL);
+        } else if (resultSet.getString(5) != null) {
+            column.setSize(resultSet.getLong(5));
+        } else if (resultSet.getString(6) != null) {
+            column.setSize(resultSet.getLong(6));
+        }
+        if (column.getColumnType().equals(ColumnTypeDto.TIMESTAMP) || column.getColumnType().equals(ColumnTypeDto.DATETIME)) {
+            column.setDateFormat(ImageDateDto.builder()
+                    .id(queryConfig.getDefaultTimestampFormatId())
+                    .build());
+        } else if (column.getColumnType().equals(ColumnTypeDto.DATE)) {
+            column.setDateFormat(ImageDateDto.builder()
+                    .id(queryConfig.getDefaultDateFormatId())
+                    .build());
+        } else if (column.getColumnType().equals(ColumnTypeDto.TIME)) {
+            column.setDateFormat(ImageDateDto.builder()
+                    .id(queryConfig.getDefaultTimeFormatId())
+                    .build());
+        }
+        view.getColumns()
+                .add(column);
+        log.trace("parsed view {}.{} column: {}", view.getDatabase().getInternalName(), view.getInternalName(), column.getInternalName());
+        return view;
+    }
+
+    /**
+     * Parse columns from a SQL statement of a known database.
+     * @param database The database.
+     * @param query The SQL statement.
+     * @return The list of columns.
+     * @throws JSQLParserException The table/view or column was not found in the database.
+     */
+    default List<ColumnDto> parseColumns(DatabaseDto database, String query) throws JSQLParserException {
+        final List<ColumnDto> columns = new ArrayList<>();
+        final CCJSqlParserManager parserRealSql = new CCJSqlParserManager();
+        final net.sf.jsqlparser.statement.Statement statement = parserRealSql.parse(new StringReader(query));
+        log.trace("parse columns from query: {}", query);
+        /* bi-directional mapping */
+        database.getTables()
+                .forEach(table -> table.getColumns()
+                        .forEach(column -> column.setTable(table)));
+        /* check */
+        if (!(statement instanceof Select selectStatement)) {
+            log.error("Query attempts to update the dataset, not a SELECT statement");
+            throw new JSQLParserException("Query attempts to update the dataset");
+        }
+        /* start parsing */
+        final PlainSelect ps = (PlainSelect) selectStatement.getSelectBody();
+        final List<SelectItem> clauses = ps.getSelectItems();
+        log.trace("columns referenced in the from-clause: {}", clauses);
+        /* Parse all tables */
+        final List<FromItem> fromItems = new ArrayList<>(fromItemToFromItems(ps.getFromItem()));
+        if (ps.getJoins() != null && !ps.getJoins().isEmpty()) {
+            log.trace("query contains join items: {}", ps.getJoins());
+            for (net.sf.jsqlparser.statement.select.Join j : ps.getJoins()) {
+                if (j.getRightItem() != null) {
+                    fromItems.add(j.getRightItem());
+                }
+            }
+        }
+        final List<ColumnDto> allColumns = Stream.of(database.getViews()
+                                .stream()
+                                .map(ViewDto::getColumns)
+                                .flatMap(List::stream)
+                                .map(this::viewColumnDtoToColumnDto),
+                        database.getTables()
+                                .stream()
+                                .map(TableDto::getColumns)
+                                .flatMap(List::stream))
+                .flatMap(i -> i)
+                .toList();
+        log.trace("columns referenced in the from-clause and join-clause(s): {}", clauses);
+        /* Checking if all columns exist */
+        for (SelectItem clause : clauses) {
+            final SelectExpressionItem item = (SelectExpressionItem) clause;
+            final Column column = (Column) item.getExpression();
+            final Optional<net.sf.jsqlparser.schema.Table> optional = fromItems.stream()
+                    .map(t -> (net.sf.jsqlparser.schema.Table) t)
+                    .filter(t -> {
+                        if (column.getTable() == null) {
+                            /* column does not reference a specific table, so there is only one table */
+                            final String tableName = ((net.sf.jsqlparser.schema.Table) fromItems.get(0)).getName().replace("`", "");
+                            return tableMatches(t, tableName);
+                        }
+                        final String tableName = column.getTable().getName().replace("`", "");
+                        return tableMatches(t, tableName);
+                    })
+                    .findFirst();
+            if (optional.isEmpty()) {
+                log.error("Failed to find table/view {} (with designator {})", column.getTable().getName(), column.getTable().getAlias());
+                throw new JSQLParserException("Failed to find table/view " + column.getTable().getName() + " (with alias " + column.getTable().getAlias() + ")");
+            }
+            final String columnName = column.getColumnName().replace("`", "");
+            final String tableOrView = optional.get().getName().replace("`", "");
+            final List<ColumnDto> filteredColumns = allColumns.stream()
+                    .filter(c -> (c.getAlias() != null && c.getAlias().equals(columnName)) || c.getInternalName().equals(columnName))
+                    .toList();
+            final Optional<ColumnDto> optionalColumn = filteredColumns.stream()
+                    .filter(c -> columnMatches(c, tableOrView))
+                    .findFirst();
+            if (optionalColumn.isEmpty()) {
+                log.error("Failed to find column with name {} of table/view {} in {}", columnName, tableOrView, filteredColumns.stream().map(c -> c.getTable().getInternalName() + "." + c.getInternalName()).toList());
+                throw new JSQLParserException("Failed to find column with name " + columnName + " of table/view " + tableOrView);
+            }
+            final ColumnDto resultColumn = optionalColumn.get();
+            if (item.getAlias() != null) {
+                resultColumn.setAlias(item.getAlias().getName().replace("`", ""));
+            }
+            resultColumn.setDatabaseId(database.getId());
+            resultColumn.setTable(resultColumn.getTable());
+            resultColumn.setTableId(resultColumn.getTable().getId());
+            log.trace("found column with internal name {} and alias {}", resultColumn.getInternalName(), resultColumn.getAlias());
+            columns.add(resultColumn);
+        }
+        return columns;
+    }
+
+    default boolean tableMatches(net.sf.jsqlparser.schema.Table table, String otherTableName) {
+        final String tableName = table.getName()
+                .trim()
+                .replace("`", "");
+        if (table.getAlias() == null) {
+            /* table does not have designator */
+            log.trace("table '{}' has no designator", tableName);
+            return tableName.equals(otherTableName);
+        }
+        /* has designator */
+        final String designator = table.getAlias()
+                .getName()
+                .trim()
+                .replace("`", "");
+        log.trace("table '{}' has designator {}", tableName, designator);
+        return designator.equals(otherTableName);
+    }
+
+    default boolean columnMatches(ColumnDto column, String tableOrView) {
+        if (column.getTable() != null && column.getTable().getInternalName().equals(tableOrView)) {
+            log.trace("table '{}' found in column table", tableOrView);
+            return true;
+        }
+        if (column.getViews() == null) {
+            log.trace("table/view '{}' not found among column views: empty list", tableOrView);
+            return false;
+        }
+        /* maybe matches one of the other views */
+        final boolean found = column.getViews()
+                .stream()
+                .anyMatch(v -> v.getInternalName().equals(tableOrView));
+        if (!found) {
+            log.trace("table/view '{}' not found among column views: {}", tableOrView, column.getViews().stream().map(ViewDto::getInternalName).toList());
+        }
+        return found;
+    }
+
+    default List<FromItem> fromItemToFromItems(FromItem data) throws JSQLParserException {
+        return fromItemToFromItems(data, 0);
+    }
+
+    default List<FromItem> fromItemToFromItems(FromItem data, Integer level) throws JSQLParserException {
+        final List<FromItem> fromItems = new LinkedList<>();
+        if (data instanceof net.sf.jsqlparser.schema.Table table) {
+            fromItems.add(data);
+            log.trace("from-item {} is of type table: level ~> {}", table.getName(), level);
+            return fromItems;
+        }
+        if (data instanceof SubJoin subJoin) {
+            log.trace("from-item is of type sub-join: level ~> {}", level);
+            for (Join join : subJoin.getJoinList()) {
+                final List<FromItem> tmp = fromItemToFromItems(join.getRightItem(), level + 1);
+                if (tmp == null) {
+                    log.error("Failed to find right sub-join table: {}", join.getRightItem());
+                    throw new JSQLParserException("Failed to find right sub-join table");
+                }
+                fromItems.addAll(tmp);
+            }
+            final List<FromItem> tmp = fromItemToFromItems(subJoin.getLeft(), level + 1);
+            if (tmp == null) {
+                log.error("Failed to find left sub-join table: {}", subJoin.getLeft());
+                throw new JSQLParserException("Failed to find left sub-join table");
+            }
+            fromItems.addAll(tmp);
+            return fromItems;
+        }
+        log.warn("unknown from-item {}", data);
+        return null;
+    }
+
+    default QueryDto resultSetToQueryDto(@NotNull ResultSet data) throws SQLException, QueryNotFoundException {
+        /* note that next() is called outside this mapping function */
+        return QueryDto.builder()
+                .id(data.getLong(1))
+                .created(LocalDateTime.parse(data.getString(2), mariaDbFormatter)
+                        .atZone(ZoneId.of("UTC"))
+                        .toInstant())
+                .createdBy(UUID.fromString(data.getString(3)))
+                .query(data.getString(4))
+                .queryHash(data.getString(5))
+                .resultHash(data.getString(6))
+                .resultNumber(data.getLong(7))
+                .isPersisted(data.getBoolean(8))
+                .execution(LocalDateTime.parse(data.getString(9), mariaDbFormatter)
+                        .atZone(ZoneId.of("UTC"))
+                        .toInstant())
+                .build();
+    }
+
+    default List<TableHistoryDto> resultSetToTableHistory(ResultSet resultSet) throws SQLException {
+        /* columns */
+        final List<TableHistoryDto> history = new LinkedList<>();
+        while (resultSet.next()) {
+            history.add(TableHistoryDto.builder()
+                    .timestamp(LocalDateTime.parse(resultSet.getString(1), mariaDbFormatter)
+                            .atZone(ZoneId.of("UTC"))
+                            .toInstant())
+                    .event(resultSet.getString(2))
+                    .total(resultSet.getLong(3))
+                    .build());
+        }
+        log.trace("found {} history event(s)", history.size());
+        return history;
+    }
+
+    default TableDto resultSetToConstraint(ResultSet resultSet, TableDto table) throws SQLException {
+        final String type = resultSet.getString(2);
+        final String name = resultSet.getString(3);
+        final String columnName = resultSet.getString(4);
+        final String referencedTable = resultSet.getString(5);
+        final String referencedColumnName = resultSet.getString(6);
+        final ReferenceTypeDto deleteRule = resultSet.getString(7) != null ? ReferenceTypeDto.fromType(resultSet.getString(7)) : null;
+        final ReferenceTypeDto updateRule = resultSet.getString(8) != null ? ReferenceTypeDto.fromType(resultSet.getString(8)) : null;
+        final Optional<ColumnDto> optional = table.getColumns().stream()
+                .filter(c -> c.getInternalName().equals(columnName))
+                .findFirst();
+        if (optional.isEmpty()) {
+            log.error("Failed to find table column: {}", columnName);
+            throw new IllegalArgumentException("Failed to find table column");
+        }
+        final ColumnDto column = optional.get();
+        if (type.equals("FOREIGN KEY") || type.equals("UNIQUE")) {
+            final Optional<UniqueDto> optional2 = table.getConstraints().getUniques().stream().filter(u -> u.getName().equals(name)).findFirst();
+            if (optional2.isPresent()) {
+                optional2.get()
+                        .getColumns()
+                        .add(column);
+                return table;
+            }
+            if (type.equals("UNIQUE")) {
+                table.getConstraints()
+                        .getUniques()
+                        .add(UniqueDto.builder()
+                                .name(name)
+                                .columns(new LinkedList<>(List.of(column)))
+                                .build());
+                return table;
+            }
+            final Optional<ForeignKeyDto> optional1 = table.getConstraints()
+                    .getForeignKeys()
+                    .stream()
+                    .filter(fk -> fk.getName().equals(name))
+                    .findFirst();
+            final ForeignKeyReferenceDto foreignKeyReference = ForeignKeyReferenceDto.builder()
+                    .column(ColumnBriefDto.builder()
+                            .name(columnName)
+                            .internalName(columnName)
+                            .databaseId(table.getTdbid())
+                            .build())
+                    .referencedColumn(ColumnBriefDto.builder()
+                            .name(referencedColumnName)
+                            .internalName(referencedColumnName)
+                            .databaseId(table.getTdbid())
+                            .build())
+                    .build();
+            if (optional1.isPresent()) {
+                foreignKeyReference.setForeignKey(foreignKeyDtoToForeignKeyBriefDto(optional1.get()));
+                optional1.get()
+                        .getReferences()
+                        .add(foreignKeyReference);
+                log.debug("found foreign key: create part ({}) referencing table {} ({})", columnName, referencedTable, referencedColumnName);
+                return table;
+            }
+            final ForeignKeyDto foreignKey = ForeignKeyDto.builder()
+                    .name(name)
+                    .table(tableDtoToTableBriefDto(table))
+                    .referencedTable(TableBriefDto.builder()
+                            .name(referencedTable)
+                            .internalName(referencedTable)
+                            .databaseId(table.getTdbid())
+                            .build())
+                    .references(new LinkedList<>(List.of(foreignKeyReference)))
+                    .onDelete(deleteRule)
+                    .onUpdate(updateRule)
+                    .build();
+            foreignKey.getReferences()
+                    .forEach(ref -> ref.setForeignKey(foreignKeyDtoToForeignKeyBriefDto(foreignKey)));
+            table.getConstraints()
+                    .getForeignKeys()
+                    .add(foreignKey);
+            log.debug("create foreign key: add part ({}) referencing table {} ({})", columnName, referencedTable, referencedColumnName);
+            return table;
+        }
+        return table;
+    }
+
+    default TableDto schemaResultSetToTable(DatabaseDto database, ResultSet resultSet) throws SQLException,
+            TableNotFoundException {
+        if (!resultSet.next()) {
+            throw new TableNotFoundException("Failed to find table in the information schema");
+        }
+        final TableDto table = TableDto.builder()
+                .name(resultSet.getString(1))
+                .internalName(resultSet.getString(1))
+                .isVersioned(resultSet.getString(2).equals("SYSTEM VERSIONED"))
+                .numRows(resultSet.getLong(3))
+                .avgRowLength(resultSet.getLong(4))
+                .dataLength(resultSet.getLong(5))
+                .maxDataLength(resultSet.getLong(6))
+                .tdbid(database.getId())
+                .queueName("dbrepo")
+                .routingKey("dbrepo")
+                .description(resultSet.getString(10))
+                .columns(new LinkedList<>())
+                .identifiers(new LinkedList<>())
+                .creator(database.getOwner())
+                .createdBy(database.getOwner().getId())
+                .owner(database.getOwner())
+                .constraints(ConstraintsDto.builder()
+                        .foreignKeys(new LinkedList<>())
+                        .primaryKey(new LinkedHashSet<>())
+                        .uniques(new LinkedList<>())
+                        .checks(new LinkedHashSet<>())
+                        .build())
+                .isPublic(database.getIsPublic())
+                .build();
+        if (resultSet.getString(7) != null && !resultSet.getString(7).isEmpty()) {
+            table.setCreated(Timestamp.valueOf(resultSet.getString(7))
+                    .toInstant());
+        }
+        return table;
+    }
+
+    default Object dataColumnToObject(Object data, ColumnDto column) {
+        if (data == null) {
+            return null;
+        }
+        /* boolean encoding fix */
+        if (column.getColumnType().equals(ColumnTypeDto.TINYINT) && column.getSize() == 1) {
+            log.trace("column {} is of type tinyint with size {}: map to boolean", column.getInternalName(), column.getSize());
+            column.setColumnType(ColumnTypeDto.BOOL);
+        }
+        switch (column.getColumnType()) {
+            case DATE -> {
+                if (column.getDateFormat() == null) {
+                    log.error("Missing date format for column {}", column.getId());
+                    throw new IllegalArgumentException("Missing date format");
+                }
+                log.trace("mapping {} to date with format '{}'", data, column.getDateFormat());
+                final DateTimeFormatter formatter = new DateTimeFormatterBuilder()
+                        .parseCaseInsensitive() /* case insensitive to parse JAN and FEB */
+                        .appendPattern(column.getDateFormat().getUnixFormat())
+                        .toFormatter(Locale.ENGLISH);
+                final LocalDate date = LocalDate.parse(String.valueOf(data), formatter);
+                return date.atStartOfDay(ZoneId.of("UTC"))
+                        .toInstant();
+            }
+            case TIMESTAMP, DATETIME -> {
+                if (column.getDateFormat() == null) {
+                    log.error("Missing date format for column {}", column.getId());
+                    throw new IllegalArgumentException("Missing date format");
+                }
+                log.trace("mapping {} to timestamp with format '{}'", data, column.getDateFormat());
+                return Timestamp.valueOf(data.toString())
+                        .toInstant();
+            }
+            case BINARY, VARBINARY, BIT -> {
+                log.trace("mapping {} -> binary", data);
+                return Long.parseLong(String.valueOf(data), 2);
+            }
+            case TEXT, CHAR, VARCHAR, TINYTEXT, MEDIUMTEXT, LONGTEXT, ENUM, SET -> {
+                log.trace("mapping {} -> string", data);
+                return String.valueOf(data);
+            }
+            case BIGINT -> {
+                log.trace("mapping {} -> biginteger", data);
+                return new BigInteger(String.valueOf(data));
+            }
+            case INT, SMALLINT, MEDIUMINT, TINYINT -> {
+                log.trace("mapping {} -> integer", data);
+                return Integer.parseInt(String.valueOf(data));
+            }
+            case DECIMAL, FLOAT, DOUBLE -> {
+                log.trace("mapping {} -> double", data);
+                return Double.valueOf(String.valueOf(data));
+            }
+            case BOOL -> {
+                log.trace("mapping {} -> boolean", data);
+                return Boolean.valueOf(String.valueOf(data));
+            }
+            case TIME -> {
+                log.trace("mapping {} -> time", data);
+                return String.valueOf(data);
+            }
+            case YEAR -> {
+                final String date = String.valueOf(data);
+                log.trace("mapping {} -> year", date);
+                return Short.valueOf(date.substring(0, date.indexOf('-')));
+            }
+        }
+        log.warn("column type {} is not known", column.getColumnType());
+        throw new IllegalArgumentException("Column type not known");
+    }
+
+    default QueryResultDto resultListToQueryResultDto(List<ColumnDto> columns, ResultSet result) throws SQLException {
+        log.trace("mapping result list to query result, columns.size={}", columns.size());
+        final List<Map<String, Object>> resultList = new LinkedList<>();
+        while (result.next()) {
+            /* map the result set to the columns through the stored metadata in the metadata database */
+            int[] idx = new int[]{1};
+            final Map<String, Object> map = new HashMap<>();
+            for (final ColumnDto column : columns) {
+                final String columnOrAlias;
+                if (column.getAlias() != null) {
+                    log.debug("column {} has alias {}", column.getInternalName(), column.getAlias());
+                    columnOrAlias = column.getAlias();
+                } else {
+                    columnOrAlias = column.getInternalName();
+                }
+                if (List.of(ColumnTypeDto.BLOB, ColumnTypeDto.TINYBLOB, ColumnTypeDto.MEDIUMBLOB, ColumnTypeDto.LONGBLOB).contains(column.getColumnType())) {
+                    log.trace("column {} is of type {}", columnOrAlias, column.getColumnType().getType().toLowerCase());
+                    final Blob blob = result.getBlob(idx[0]++);
+                    final String value = blob == null ? null : Hex.encodeHexString(blob.getBytes(1, (int) blob.length())).toUpperCase();
+                    map.put(columnOrAlias, value);
+                    continue;
+                }
+                final Object object = dataColumnToObject(result.getObject(idx[0]++), column);
+                if (object == null) {
+                    log.warn("result set for column {} is empty (=null)", column.getInternalName());
+                }
+                map.put(columnOrAlias, object);
+            }
+            resultList.add(map);
+        }
+        final int[] idx = new int[]{0};
+        final List<Map<String, Integer>> headers = columns.stream()
+                .map(c -> (Map<String, Integer>) new LinkedHashMap<String, Integer>() {{
+                    put(c.getAlias() != null ? c.getAlias() : c.getInternalName(), idx[0]++);
+                }})
+                .toList();
+        log.trace("created ordered header list: {}", headers);
+        return QueryResultDto.builder()
+                .result(resultList)
+                .headers(headers)
+                .build();
+    }
+
     default void prepareStatementWithColumnTypeObject(PreparedStatement ps, ColumnTypeDto columnType, int idx, Object value) throws SQLException {
         switch (columnType) {
             case BLOB, TINYBLOB, MEDIUMBLOB, LONGBLOB:
diff --git a/dbrepo-data-service/services/src/main/java/at/tuwien/mapper/MariaDbMapper.java b/dbrepo-data-service/services/src/main/java/at/tuwien/mapper/MariaDbMapper.java
index afb0701455515758c8bec86eac138bdccb151a20..8b43248271a686a3c23bceff02fc9d61ee801f9e 100644
--- a/dbrepo-data-service/services/src/main/java/at/tuwien/mapper/MariaDbMapper.java
+++ b/dbrepo-data-service/services/src/main/java/at/tuwien/mapper/MariaDbMapper.java
@@ -1,41 +1,16 @@
 package at.tuwien.mapper;
 
-import at.tuwien.api.container.image.ImageDateDto;
-import at.tuwien.api.database.DatabaseDto;
-import at.tuwien.api.database.ViewColumnDto;
-import at.tuwien.api.database.ViewDto;
 import at.tuwien.api.database.query.ImportCsvDto;
-import at.tuwien.api.database.query.QueryDto;
-import at.tuwien.api.database.query.QueryResultDto;
 import at.tuwien.api.database.table.*;
 import at.tuwien.api.database.table.columns.*;
-import at.tuwien.api.database.table.constraints.ConstraintsDto;
-import at.tuwien.api.database.table.constraints.foreign.ForeignKeyBriefDto;
-import at.tuwien.api.database.table.constraints.foreign.ForeignKeyDto;
-import at.tuwien.api.database.table.constraints.foreign.ForeignKeyReferenceDto;
-import at.tuwien.api.database.table.constraints.foreign.ReferenceTypeDto;
-import at.tuwien.api.database.table.constraints.primary.PrimaryKeyDto;
-import at.tuwien.api.database.table.constraints.unique.UniqueDto;
 import at.tuwien.api.database.table.internal.PrivilegedTableDto;
-import at.tuwien.config.QueryConfig;
 import at.tuwien.exception.*;
 import at.tuwien.utils.MariaDbUtil;
-import com.github.dockerjava.zerodep.shaded.org.apache.commons.codec.binary.Hex;
-import com.google.common.hash.Hashing;
-import net.sf.jsqlparser.JSQLParserException;
-import net.sf.jsqlparser.parser.CCJSqlParserManager;
-import net.sf.jsqlparser.schema.Column;
-import net.sf.jsqlparser.statement.select.*;
-import org.jetbrains.annotations.NotNull;
 import org.mapstruct.Mapper;
-import org.mapstruct.Mapping;
-import org.mapstruct.Mappings;
 import org.mapstruct.Named;
 
-import javax.swing.table.TableColumn;
 import java.io.*;
 import java.math.BigInteger;
-import java.nio.charset.StandardCharsets;
 import java.sql.*;
 import java.sql.Date;
 import java.text.Normalizer;
@@ -45,9 +20,8 @@ import java.time.format.DateTimeFormatterBuilder;
 import java.util.*;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
-import java.util.stream.Stream;
 
-@Mapper(componentModel = "spring", uses = {MetadataMapper.class})
+@Mapper(componentModel = "spring", uses = {MetadataMapper.class, DataMapper.class})
 public interface MariaDbMapper {
 
     org.slf4j.Logger log = org.slf4j.LoggerFactory.getLogger(MariaDbMapper.class);
@@ -108,49 +82,6 @@ public interface MariaDbMapper {
         return statement.toString();
     }
 
-    default QueryResultDto resultListToQueryResultDto(List<ColumnDto> columns, ResultSet result) throws SQLException {
-        log.trace("mapping result list to query result, columns.size={}", columns.size());
-        final List<Map<String, Object>> resultList = new LinkedList<>();
-        while (result.next()) {
-            /* map the result set to the columns through the stored metadata in the metadata database */
-            int[] idx = new int[]{1};
-            final Map<String, Object> map = new HashMap<>();
-            for (final ColumnDto column : columns) {
-                final String columnOrAlias;
-                if (column.getAlias() != null) {
-                    log.debug("column {} has alias {}", column.getInternalName(), column.getAlias());
-                    columnOrAlias = column.getAlias();
-                } else {
-                    columnOrAlias = column.getInternalName();
-                }
-                if (List.of(ColumnTypeDto.BLOB, ColumnTypeDto.TINYBLOB, ColumnTypeDto.MEDIUMBLOB, ColumnTypeDto.LONGBLOB).contains(column.getColumnType())) {
-                    log.trace("column {} is of type {}", columnOrAlias, column.getColumnType().getType().toLowerCase());
-                    final Blob blob = result.getBlob(idx[0]++);
-                    final String value = blob == null ? null : Hex.encodeHexString(blob.getBytes(1, (int) blob.length())).toUpperCase();
-                    map.put(columnOrAlias, value);
-                    continue;
-                }
-                final Object object = dataColumnToObject(result.getObject(idx[0]++), column);
-                if (object == null) {
-                    log.warn("result set for column {} is empty (=null)", column.getInternalName());
-                }
-                map.put(columnOrAlias, object);
-            }
-            resultList.add(map);
-        }
-        final int[] idx = new int[]{0};
-        final List<Map<String, Integer>> headers = columns.stream()
-                .map(c -> (Map<String, Integer>) new LinkedHashMap<String, Integer>() {{
-                    put(c.getAlias() != null ? c.getAlias() : c.getInternalName(), idx[0]++);
-                }})
-                .toList();
-        log.trace("created ordered header list: {}", headers);
-        return QueryResultDto.builder()
-                .result(resultList)
-                .headers(headers)
-                .build();
-    }
-
     default String databaseTablesSelectRawQuery() {
         final String statement = "SELECT DISTINCT t.`TABLE_NAME` FROM information_schema.TABLES t WHERE t.`TABLE_SCHEMA` = ? AND t.`TABLE_TYPE` = 'SYSTEM VERSIONED' AND t.`TABLE_NAME` != 'qs_queries' ORDER BY t.`TABLE_NAME` ASC";
         log.trace("mapped select tables statement: {}", statement);
@@ -182,7 +113,7 @@ public interface MariaDbMapper {
     }
 
     default String databaseTableConstraintsSelectRawQuery() {
-        final String statement = "SELECT k.`ORDINAL_POSITION`, c.`CONSTRAINT_TYPE`, k.`CONSTRAINT_NAME`, k.`COLUMN_NAME`, k.`REFERENCED_TABLE_NAME`, k.`REFERENCED_COLUMN_NAME`, r.`DELETE_RULE`, r.`UPDATE_RULE` FROM information_schema.TABLE_CONSTRAINTS c JOIN information_schema.KEY_COLUMN_USAGE k ON c.`TABLE_NAME` = k.`TABLE_NAME` AND c.`CONSTRAINT_NAME` = k.`CONSTRAINT_NAME` LEFT JOIN information_schema.REFERENTIAL_CONSTRAINTS r ON r.`CONSTRAINT_NAME` = k.`CONSTRAINT_NAME` WHERE LOWER(k.`COLUMN_NAME`) != 'row_end' AND c.`TABLE_SCHEMA` = ? AND c.`TABLE_NAME` = ? GROUP BY k.`ORDINAL_POSITION`, k.`CONSTRAINT_NAME` ORDER BY k.`ORDINAL_POSITION` ASC;";
+        final String statement = "SELECT k.`ORDINAL_POSITION`, c.`CONSTRAINT_TYPE`, k.`CONSTRAINT_NAME`, k.`COLUMN_NAME`, k.`REFERENCED_TABLE_NAME`, k.`REFERENCED_COLUMN_NAME`, r.`DELETE_RULE`, r.`UPDATE_RULE` FROM information_schema.TABLE_CONSTRAINTS c JOIN information_schema.KEY_COLUMN_USAGE k ON c.`TABLE_NAME` = k.`TABLE_NAME` AND c.`CONSTRAINT_NAME` = k.`CONSTRAINT_NAME` LEFT JOIN information_schema.REFERENTIAL_CONSTRAINTS r ON r.`CONSTRAINT_NAME` = k.`CONSTRAINT_NAME` AND r.`CONSTRAINT_SCHEMA` = c.`TABLE_SCHEMA` AND r.`TABLE_NAME` = c.`TABLE_NAME` WHERE LOWER(k.`COLUMN_NAME`) != 'row_end' AND c.`TABLE_SCHEMA` = ? AND c.`TABLE_NAME` = ? ORDER BY k.`ORDINAL_POSITION` ASC;";
         log.trace("mapped select table constraints statement: {}", statement);
         return statement;
     }
@@ -411,23 +342,6 @@ public interface MariaDbMapper {
         return data.getLong(1);
     }
 
-    default TableStatisticDto resultSetToTableStatistic(ResultSet data) throws SQLException {
-        final TableStatisticDto statistic = TableStatisticDto.builder()
-                .columns(new LinkedHashMap<>())
-                .build();
-        while (data.next()) {
-            final ColumnStatisticDto columnStatistic = ColumnStatisticDto.builder()
-                    .min(data.getBigDecimal(2))
-                    .max(data.getBigDecimal(3))
-                    .median(data.getBigDecimal(4))
-                    .mean(data.getBigDecimal(5))
-                    .stdDev(data.getBigDecimal(6))
-                    .build();
-            statistic.getColumns().put(data.getString(1), columnStatistic);
-        }
-        return statistic;
-    }
-
     /**
      * Selects the dataset page from a table/view.
      *
@@ -490,48 +404,6 @@ public interface MariaDbMapper {
         return "DROP TABLE `" + tableName + "`;";
     }
 
-    default String tupleToRawInsertQuery(PrivilegedTableDto table, TupleDto data) throws TableMalformedException {
-        log.trace("mapping table data to insert query, table={}, data={}", table, data);
-        if (table.getColumns().isEmpty()) {
-            throw new TableMalformedException("Columns are not known: empty");
-        }
-        /* parameterized query for prepared statement */
-        final StringBuilder statement = new StringBuilder("INSERT INTO `")
-                .append(table.getInternalName())
-                .append("` (")
-                .append(data.getData()
-                        .keySet()
-                        .stream()
-                        .map(o -> "`" + o + "`")
-                        .collect(Collectors.joining(",")))
-                .append(") VALUES (")
-                .append(data.getData()
-                        .keySet()
-                        .stream()
-                        .map(o -> "?")
-                        .collect(Collectors.joining(",")));
-        statement.append(");");
-        for (int i = 0; i < table.getColumns().size(); i++) {
-            final ColumnDto column = table.getColumns()
-                    .get(i);
-            if (column.getAutoGenerated()) {
-                log.trace("column is auto-generated, skip.");
-                continue;
-            }
-            final Optional<Map.Entry<String, Object>> tuple = data.getData()
-                    .entrySet()
-                    .stream()
-                    .filter(d -> d.getKey().equals(column.getInternalName()))
-                    .findFirst();
-            if (tuple.isEmpty()) {
-                log.error("Failed to map column name {}, known names: {}", column.getInternalName(), data.getData().keySet());
-                throw new TableMalformedException("Failed to map column names: not all columns are present in the tuple!");
-            }
-        }
-        log.trace("mapped tuple insert query: {}", statement);
-        return statement.toString();
-    }
-
     default String tableOrViewToRawExportQuery(String databaseName, String tableOrView, List<ColumnDto> columns,
                                                Instant timestamp, String filePath) {
         final StringBuilder statement = new StringBuilder("SELECT ");
@@ -583,280 +455,6 @@ public interface MariaDbMapper {
         return statement.toString();
     }
 
-    /**
-     * Map the inspected schema to either an existing view/table and append e.g. column or (if not existing) create a new view/table.
-     * @param database The database.
-     * @param resultSet The inspected schema.
-     * @return The database containing the updated view/table.
-     * @throws SQLException
-     */
-    default ViewDto schemaResultSetToView(DatabaseDto database, ResultSet resultSet) throws SQLException {
-        return ViewDto.builder()
-                .name(resultSet.getString(1))
-                .internalName(resultSet.getString(1))
-                .vdbid(database.getId())
-                .database(database)
-                .isInitialView(false)
-                .isPublic(database.getIsPublic())
-                .query(resultSet.getString(9))
-                .queryHash(Hashing.sha256()
-                        .hashString(resultSet.getString(9), StandardCharsets.UTF_8)
-                        .toString())
-                .columns(new LinkedList<>())
-                .identifiers(new LinkedList<>())
-                .creator(database.getOwner())
-                .createdBy(database.getOwner().getId())
-                .build();
-    }
-
-    ViewColumnDto columnDtoToViewColumnDto(ColumnDto data);
-
-    ColumnDto viewColumnDtoToColumnDto(ViewColumnDto data);
-
-    default TableDto schemaResultSetToTable(DatabaseDto database, ResultSet resultSet) throws SQLException,
-            TableNotFoundException {
-        if (!resultSet.next()) {
-            throw new TableNotFoundException("Failed to find table in the information schema");
-        }
-        final TableDto table = TableDto.builder()
-                .name(resultSet.getString(1))
-                .internalName(resultSet.getString(1))
-                .isVersioned(resultSet.getString(2).equals("SYSTEM VERSIONED"))
-                .numRows(resultSet.getLong(3))
-                .avgRowLength(resultSet.getLong(4))
-                .dataLength(resultSet.getLong(5))
-                .maxDataLength(resultSet.getLong(6))
-                .tdbid(database.getId())
-                .queueName("dbrepo")
-                .routingKey("dbrepo")
-                .description(resultSet.getString(10))
-                .columns(new LinkedList<>())
-                .identifiers(new LinkedList<>())
-                .creator(database.getOwner())
-                .createdBy(database.getOwner().getId())
-                .owner(database.getOwner())
-                .constraints(ConstraintsDto.builder()
-                        .foreignKeys(new LinkedList<>())
-                        .primaryKey(new LinkedHashSet<>())
-                        .uniques(new LinkedList<>())
-                        .checks(new LinkedHashSet<>())
-                        .build())
-                .isPublic(database.getIsPublic())
-                .build();
-        if (resultSet.getString(7) != null && !resultSet.getString(7).isEmpty()) {
-            table.setCreated(Timestamp.valueOf(resultSet.getString(7))
-                    .toInstant());
-        }
-        return table;
-    }
-
-    ForeignKeyBriefDto foreignKeyDtoToForeignKeyBriefDto(ForeignKeyDto data);
-
-    default TableDto resultSetToConstraint(ResultSet resultSet, TableDto table) throws SQLException {
-        final String type = resultSet.getString(2);
-        final String name = resultSet.getString(3);
-        final String columnName = resultSet.getString(4);
-        final String referencedTable = resultSet.getString(5);
-        final String referencedColumnName = resultSet.getString(6);
-        final ReferenceTypeDto deleteRule = resultSet.getString(7) != null ? ReferenceTypeDto.fromType(resultSet.getString(7)) : null;
-        final ReferenceTypeDto updateRule = resultSet.getString(8) != null ? ReferenceTypeDto.fromType(resultSet.getString(8)) : null;
-        final Optional<ColumnDto> optional = table.getColumns().stream()
-                .filter(c -> c.getInternalName().equals(columnName))
-                .findFirst();
-        if (optional.isEmpty()) {
-            log.error("Failed to find table column: {}", columnName);
-            throw new IllegalArgumentException("Failed to find table column");
-        }
-        final ColumnDto column = optional.get();
-        if (type.equals("FOREIGN KEY") || type.equals("UNIQUE")) {
-            final Optional<UniqueDto> optional2 = table.getConstraints().getUniques().stream().filter(u -> u.getName().equals(name)).findFirst();
-            if (optional2.isPresent()) {
-                optional2.get()
-                        .getColumns()
-                        .add(column);
-                return table;
-            }
-            if (type.equals("UNIQUE")) {
-                table.getConstraints()
-                        .getUniques()
-                        .add(UniqueDto.builder()
-                                .name(name)
-                                .columns(new LinkedList<>(List.of(column)))
-                                .build());
-                return table;
-            }
-            final Optional<ForeignKeyDto> optional1 = table.getConstraints()
-                    .getForeignKeys()
-                    .stream()
-                    .filter(fk -> fk.getName().equals(name))
-                    .findFirst();
-            final ForeignKeyReferenceDto foreignKeyReference = ForeignKeyReferenceDto.builder()
-                    .column(ColumnBriefDto.builder()
-                            .name(columnName)
-                            .internalName(columnName)
-                            .databaseId(table.getTdbid())
-                            .build())
-                    .referencedColumn(ColumnBriefDto.builder()
-                            .name(referencedColumnName)
-                            .internalName(referencedColumnName)
-                            .databaseId(table.getTdbid())
-                            .build())
-                    .build();
-            if (optional1.isPresent()) {
-                foreignKeyReference.setForeignKey(foreignKeyDtoToForeignKeyBriefDto(optional1.get()));
-                optional1.get()
-                        .getReferences()
-                        .add(foreignKeyReference);
-                log.debug("found foreign key: create part ({}) referencing table {} ({})", columnName, referencedTable, referencedColumnName);
-                return table;
-            }
-            final ForeignKeyDto foreignKey = ForeignKeyDto.builder()
-                    .name(name)
-                    .table(tableDtoToTableBriefDto(table))
-                    .referencedTable(TableBriefDto.builder()
-                            .name(referencedTable)
-                            .internalName(referencedTable)
-                            .databaseId(table.getTdbid())
-                            .build())
-                    .references(new LinkedList<>(List.of(foreignKeyReference)))
-                    .onDelete(deleteRule)
-                    .onUpdate(updateRule)
-                    .build();
-            foreignKey.getReferences()
-                    .forEach(ref -> ref.setForeignKey(foreignKeyDtoToForeignKeyBriefDto(foreignKey)));
-            table.getConstraints()
-                    .getForeignKeys()
-                    .add(foreignKey);
-            log.debug("create foreign key: add part ({}) referencing table {} ({})", columnName, referencedTable, referencedColumnName);
-            return table;
-        }
-        return table;
-    }
-
-    @Mappings({
-            @Mapping(target = "databaseId", source = "tdbid")
-    })
-    TableBriefDto tableDtoToTableBriefDto(TableDto data);
-
-    ColumnBriefDto columnDtoToColumnBriefDto(ColumnDto data);
-
-    default TableDto resultSetToTable(ResultSet resultSet, TableDto table, QueryConfig queryConfig) throws SQLException {
-        final ColumnDto column = ColumnDto.builder()
-                .ordinalPosition(resultSet.getInt(1) - 1) /* start at zero */
-                .autoGenerated(resultSet.getString(2) != null && resultSet.getString(2).startsWith("nextval"))
-                .isNullAllowed(resultSet.getString(3).equals("YES"))
-                .columnType(ColumnTypeDto.valueOf(resultSet.getString(4).toUpperCase()))
-                .d(resultSet.getString(7) != null ? resultSet.getLong(7) : null)
-                .name(resultSet.getString(10))
-                .internalName(resultSet.getString(10))
-                .table(table)
-                .tableId(table.getId())
-                .databaseId(table.getTdbid())
-                .description(resultSet.getString(11))
-                .build();
-        if (column.getColumnType().equals(ColumnTypeDto.ENUM)) {
-            column.setEnums(Arrays.stream(resultSet.getString(8)
-                            .substring(0, resultSet.getString(8).length() - 1)
-                            .replace("enum(", "")
-                            .split(","))
-                    .map(value -> value.replace("'", ""))
-                    .toList());
-        }
-        if (column.getColumnType().equals(ColumnTypeDto.SET)) {
-            column.setSets(Arrays.stream(resultSet.getString(8)
-                            .substring(0, resultSet.getString(8).length() - 1)
-                            .replace("set(", "")
-                            .split(","))
-                    .map(value -> value.replace("'", ""))
-                    .toList());
-        }
-        /* constraints */
-        if (resultSet.getString(9) != null && resultSet.getString(9).equals("PRI")) {
-            table.getConstraints().getPrimaryKey().add(PrimaryKeyDto.builder()
-                    .table(tableDtoToTableBriefDto(table))
-                    .column(columnDtoToColumnBriefDto(column))
-                    .build());
-        }
-        /* fix boolean and set size for others */
-        if (resultSet.getString(8).equalsIgnoreCase("tinyint(1)")) {
-            column.setColumnType(ColumnTypeDto.BOOL);
-        } else if (resultSet.getString(5) != null) {
-            column.setSize(resultSet.getLong(5));
-        } else if (resultSet.getString(6) != null) {
-            column.setSize(resultSet.getLong(6));
-        }
-        if (column.getColumnType().equals(ColumnTypeDto.TIMESTAMP) || column.getColumnType().equals(ColumnTypeDto.DATETIME)) {
-            column.setDateFormat(ImageDateDto.builder()
-                    .id(queryConfig.getDefaultTimestampFormatId())
-                    .build());
-        } else if (column.getColumnType().equals(ColumnTypeDto.DATE)) {
-            column.setDateFormat(ImageDateDto.builder()
-                    .id(queryConfig.getDefaultDateFormatId())
-                    .build());
-        } else if (column.getColumnType().equals(ColumnTypeDto.TIME)) {
-            column.setDateFormat(ImageDateDto.builder()
-                    .id(queryConfig.getDefaultTimeFormatId())
-                    .build());
-        }
-        table.getColumns()
-                .add(column);
-        return table;
-    }
-
-    default ViewDto resultSetToTable(ResultSet resultSet, ViewDto view, QueryConfig queryConfig) throws SQLException {
-        final ViewColumnDto column = ViewColumnDto.builder()
-                .ordinalPosition(resultSet.getInt(1) - 1) /* start at zero */
-                .autoGenerated(resultSet.getString(2) != null && resultSet.getString(2).startsWith("nextval"))
-                .isNullAllowed(resultSet.getString(3).equals("YES"))
-                .columnType(ColumnTypeDto.valueOf(resultSet.getString(4).toUpperCase()))
-                .d(resultSet.getString(7) != null ? resultSet.getLong(7) : null)
-                .name(resultSet.getString(10))
-                .internalName(resultSet.getString(10))
-                .databaseId(view.getDatabase().getId())
-                .build();
-        /* fix boolean and set size for others */
-        if (resultSet.getString(8).equalsIgnoreCase("tinyint(1)")) {
-            column.setColumnType(ColumnTypeDto.BOOL);
-        } else if (resultSet.getString(5) != null) {
-            column.setSize(resultSet.getLong(5));
-        } else if (resultSet.getString(6) != null) {
-            column.setSize(resultSet.getLong(6));
-        }
-        if (column.getColumnType().equals(ColumnTypeDto.TIMESTAMP) || column.getColumnType().equals(ColumnTypeDto.DATETIME)) {
-            column.setDateFormat(ImageDateDto.builder()
-                    .id(queryConfig.getDefaultTimestampFormatId())
-                    .build());
-        } else if (column.getColumnType().equals(ColumnTypeDto.DATE)) {
-            column.setDateFormat(ImageDateDto.builder()
-                    .id(queryConfig.getDefaultDateFormatId())
-                    .build());
-        } else if (column.getColumnType().equals(ColumnTypeDto.TIME)) {
-            column.setDateFormat(ImageDateDto.builder()
-                    .id(queryConfig.getDefaultTimeFormatId())
-                    .build());
-        }
-        view.getColumns()
-                .add(column);
-        log.trace("parsed view {}.{} column: {}", view.getDatabase().getInternalName(), view.getInternalName(), column.getInternalName());
-        return view;
-    }
-
-    default List<TableHistoryDto> resultSetToTableHistory(ResultSet resultSet) throws SQLException {
-        /* columns */
-        final List<TableHistoryDto> history = new LinkedList<>();
-        while (resultSet.next()) {
-            history.add(TableHistoryDto.builder()
-                    .timestamp(LocalDateTime.parse(resultSet.getString(1), mariaDbFormatter)
-                            .atZone(ZoneId.of("UTC"))
-                            .toInstant())
-                    .event(resultSet.getString(2))
-                    .total(resultSet.getLong(3))
-                    .build());
-        }
-        log.trace("found {} history event(s)", history.size());
-        return history;
-    }
-
     default String datasetToRawInsertQuery(String databaseName, PrivilegedTableDto table, ImportCsvDto data) {
         final StringBuilder statement = new StringBuilder("LOAD DATA INFILE '")
                 .append(data.getLocation())
@@ -1292,258 +890,6 @@ public interface MariaDbMapper {
         }
     }
 
-    default Object dataColumnToObject(Object data, ColumnDto column) {
-        if (data == null) {
-            return null;
-        }
-        /* boolean encoding fix */
-        if (column.getColumnType().equals(ColumnTypeDto.TINYINT) && column.getSize() == 1) {
-            log.trace("column {} is of type tinyint with size {}: map to boolean", column.getInternalName(), column.getSize());
-            column.setColumnType(ColumnTypeDto.BOOL);
-        }
-        switch (column.getColumnType()) {
-            case DATE -> {
-                if (column.getDateFormat() == null) {
-                    log.error("Missing date format for column {}", column.getId());
-                    throw new IllegalArgumentException("Missing date format");
-                }
-                log.trace("mapping {} to date with format '{}'", data, column.getDateFormat());
-                final DateTimeFormatter formatter = new DateTimeFormatterBuilder()
-                        .parseCaseInsensitive() /* case insensitive to parse JAN and FEB */
-                        .appendPattern(column.getDateFormat().getUnixFormat())
-                        .toFormatter(Locale.ENGLISH);
-                final LocalDate date = LocalDate.parse(String.valueOf(data), formatter);
-                return date.atStartOfDay(ZoneId.of("UTC"))
-                        .toInstant();
-            }
-            case TIMESTAMP, DATETIME -> {
-                if (column.getDateFormat() == null) {
-                    log.error("Missing date format for column {}", column.getId());
-                    throw new IllegalArgumentException("Missing date format");
-                }
-                log.trace("mapping {} to timestamp with format '{}'", data, column.getDateFormat());
-                return Timestamp.valueOf(data.toString())
-                        .toInstant();
-            }
-            case BINARY, VARBINARY, BIT -> {
-                log.trace("mapping {} -> binary", data);
-                return Long.parseLong(String.valueOf(data), 2);
-            }
-            case TEXT, CHAR, VARCHAR, TINYTEXT, MEDIUMTEXT, LONGTEXT, ENUM, SET -> {
-                log.trace("mapping {} -> string", data);
-                return String.valueOf(data);
-            }
-            case BIGINT -> {
-                log.trace("mapping {} -> biginteger", data);
-                return new BigInteger(String.valueOf(data));
-            }
-            case INT, SMALLINT, MEDIUMINT, TINYINT -> {
-                log.trace("mapping {} -> integer", data);
-                return Integer.parseInt(String.valueOf(data));
-            }
-            case DECIMAL, FLOAT, DOUBLE -> {
-                log.trace("mapping {} -> double", data);
-                return Double.valueOf(String.valueOf(data));
-            }
-            case BOOL -> {
-                log.trace("mapping {} -> boolean", data);
-                return Boolean.valueOf(String.valueOf(data));
-            }
-            case TIME -> {
-                log.trace("mapping {} -> time", data);
-                return String.valueOf(data);
-            }
-            case YEAR -> {
-                final String date = String.valueOf(data);
-                log.trace("mapping {} -> year", date);
-                return Short.valueOf(date.substring(0, date.indexOf('-')));
-            }
-        }
-        log.warn("column type {} is not known", column.getColumnType());
-        throw new IllegalArgumentException("Column type not known");
-    }
-
-    /**
-     * Parse columns from a SQL statement of a known database.
-     * @param database The database.
-     * @param query The SQL statement.
-     * @return The list of columns.
-     * @throws JSQLParserException The table/view or column was not found in the database.
-     */
-    default List<ColumnDto> parseColumns(DatabaseDto database, String query) throws JSQLParserException {
-        final List<ColumnDto> columns = new ArrayList<>();
-        final CCJSqlParserManager parserRealSql = new CCJSqlParserManager();
-        final net.sf.jsqlparser.statement.Statement statement = parserRealSql.parse(new StringReader(query));
-        log.trace("parse columns from query: {}", query);
-        /* bi-directional mapping */
-        database.getTables()
-                .forEach(table -> table.getColumns()
-                        .forEach(column -> column.setTable(table)));
-        /* check */
-        if (!(statement instanceof Select selectStatement)) {
-            log.error("Query attempts to update the dataset, not a SELECT statement");
-            throw new JSQLParserException("Query attempts to update the dataset");
-        }
-        /* start parsing */
-        final PlainSelect ps = (PlainSelect) selectStatement.getSelectBody();
-        final List<SelectItem> clauses = ps.getSelectItems();
-        log.trace("columns referenced in the from-clause: {}", clauses);
-        /* Parse all tables */
-        final List<FromItem> fromItems = new ArrayList<>(fromItemToFromItems(ps.getFromItem()));
-        if (ps.getJoins() != null && !ps.getJoins().isEmpty()) {
-            log.trace("query contains join items: {}", ps.getJoins());
-            for (net.sf.jsqlparser.statement.select.Join j : ps.getJoins()) {
-                if (j.getRightItem() != null) {
-                    fromItems.add(j.getRightItem());
-                }
-            }
-        }
-        final List<ColumnDto> allColumns = Stream.of(database.getViews()
-                                .stream()
-                                .map(ViewDto::getColumns)
-                                .flatMap(List::stream)
-                                .map(this::viewColumnDtoToColumnDto),
-                        database.getTables()
-                                .stream()
-                                .map(TableDto::getColumns)
-                                .flatMap(List::stream))
-                .flatMap(i -> i)
-                .toList();
-        log.trace("columns referenced in the from-clause and join-clause(s): {}", clauses);
-        /* Checking if all columns exist */
-        for (SelectItem clause : clauses) {
-            final SelectExpressionItem item = (SelectExpressionItem) clause;
-            final Column column = (Column) item.getExpression();
-            final Optional<net.sf.jsqlparser.schema.Table> optional = fromItems.stream()
-                    .map(t -> (net.sf.jsqlparser.schema.Table) t)
-                    .filter(t -> {
-                        if (column.getTable() == null) {
-                            /* column does not reference a specific table, so there is only one table */
-                            final String tableName = ((net.sf.jsqlparser.schema.Table) fromItems.get(0)).getName().replace("`", "");
-                            return tableMatches(t, tableName);
-                        }
-                        final String tableName = column.getTable().getName().replace("`", "");
-                        return tableMatches(t, tableName);
-                    })
-                    .findFirst();
-            if (optional.isEmpty()) {
-                log.error("Failed to find table/view {} (with designator {})", column.getTable().getName(), column.getTable().getAlias());
-                throw new JSQLParserException("Failed to find table/view " + column.getTable().getName() + " (with alias " + column.getTable().getAlias() + ")");
-            }
-            final String columnName = column.getColumnName().replace("`", "");
-            final String tableOrView = optional.get().getName().replace("`", "");
-            final List<ColumnDto> filteredColumns = allColumns.stream()
-                    .filter(c -> (c.getAlias() != null && c.getAlias().equals(columnName)) || c.getInternalName().equals(columnName))
-                    .toList();
-            final Optional<ColumnDto> optionalColumn = filteredColumns.stream()
-                    .filter(c -> columnMatches(c, tableOrView))
-                    .findFirst();
-            if (optionalColumn.isEmpty()) {
-                log.error("Failed to find column with name {} of table/view {} in {}", columnName, tableOrView, filteredColumns.stream().map(c -> c.getTable().getInternalName() + "." + c.getInternalName()).toList());
-                throw new JSQLParserException("Failed to find column with name " + columnName + " of table/view " + tableOrView);
-            }
-            final ColumnDto resultColumn = optionalColumn.get();
-            if (item.getAlias() != null) {
-                resultColumn.setAlias(item.getAlias().getName().replace("`", ""));
-            }
-            resultColumn.setDatabaseId(database.getId());
-            resultColumn.setTable(resultColumn.getTable());
-            resultColumn.setTableId(resultColumn.getTable().getId());
-            log.trace("found column with internal name {} and alias {}", resultColumn.getInternalName(), resultColumn.getAlias());
-            columns.add(resultColumn);
-        }
-        return columns;
-    }
-
-    default boolean tableMatches(net.sf.jsqlparser.schema.Table table, String otherTableName) {
-        final String tableName = table.getName()
-                .trim()
-                .replace("`", "");
-        if (table.getAlias() == null) {
-            /* table does not have designator */
-            log.trace("table '{}' has no designator", tableName);
-            return tableName.equals(otherTableName);
-        }
-        /* has designator */
-        final String designator = table.getAlias()
-                .getName()
-                .trim()
-                .replace("`", "");
-        log.trace("table '{}' has designator {}", tableName, designator);
-        return designator.equals(otherTableName);
-    }
-
-    default boolean columnMatches(ColumnDto column, String tableOrView) {
-        if (column.getTable() != null && column.getTable().getInternalName().equals(tableOrView)) {
-            log.trace("table '{}' found in column table", tableOrView);
-            return true;
-        }
-        if (column.getViews() == null) {
-            log.trace("table/view '{}' not found among column views: empty list", tableOrView);
-            return false;
-        }
-        /* maybe matches one of the other views */
-        final boolean found = column.getViews()
-                .stream()
-                .anyMatch(v -> v.getInternalName().equals(tableOrView));
-        if (!found) {
-            log.trace("table/view '{}' not found among column views: {}", tableOrView, column.getViews().stream().map(ViewDto::getInternalName).toList());
-        }
-        return found;
-    }
-
-    default List<FromItem> fromItemToFromItems(FromItem data) throws JSQLParserException {
-        return fromItemToFromItems(data, 0);
-    }
-
-    default List<FromItem> fromItemToFromItems(FromItem data, Integer level) throws JSQLParserException {
-        final List<FromItem> fromItems = new LinkedList<>();
-        if (data instanceof net.sf.jsqlparser.schema.Table table) {
-            fromItems.add(data);
-            log.trace("from-item {} is of type table: level ~> {}", table.getName(), level);
-            return fromItems;
-        }
-        if (data instanceof SubJoin subJoin) {
-            log.trace("from-item is of type sub-join: level ~> {}", level);
-            for (Join join : subJoin.getJoinList()) {
-                final List<FromItem> tmp = fromItemToFromItems(join.getRightItem(), level + 1);
-                if (tmp == null) {
-                    log.error("Failed to find right sub-join table: {}", join.getRightItem());
-                    throw new JSQLParserException("Failed to find right sub-join table");
-                }
-                fromItems.addAll(tmp);
-            }
-            final List<FromItem> tmp = fromItemToFromItems(subJoin.getLeft(), level + 1);
-            if (tmp == null) {
-                log.error("Failed to find left sub-join table: {}", subJoin.getLeft());
-                throw new JSQLParserException("Failed to find left sub-join table");
-            }
-            fromItems.addAll(tmp);
-            return fromItems;
-        }
-        log.warn("unknown from-item {}", data);
-        return null;
-    }
-
-    default QueryDto resultSetToQueryDto(@NotNull ResultSet data) throws SQLException, QueryNotFoundException {
-        /* note that next() is called outside this mapping function */
-        return QueryDto.builder()
-                .id(data.getLong(1))
-                .created(LocalDateTime.parse(data.getString(2), mariaDbFormatter)
-                        .atZone(ZoneId.of("UTC"))
-                        .toInstant())
-                .createdBy(UUID.fromString(data.getString(3)))
-                .query(data.getString(4))
-                .queryHash(data.getString(5))
-                .resultHash(data.getString(6))
-                .resultNumber(data.getLong(7))
-                .isPersisted(data.getBoolean(8))
-                .execution(LocalDateTime.parse(data.getString(9), mariaDbFormatter)
-                        .atZone(ZoneId.of("UTC"))
-                        .toInstant())
-                .build();
-    }
-
     default String selectRawSelectQuery(String query, Instant timestamp, Long page, Long size) {
         query = query.toLowerCase(Locale.ROOT)
                 .trim();
diff --git a/dbrepo-data-service/services/src/main/java/at/tuwien/mapper/MetadataMapper.java b/dbrepo-data-service/services/src/main/java/at/tuwien/mapper/MetadataMapper.java
index 4cde78c7d913108bdf9d3d0d1b13c541ca44724d..fca56314af224c36719d7c5b424b47df50893033 100644
--- a/dbrepo-data-service/services/src/main/java/at/tuwien/mapper/MetadataMapper.java
+++ b/dbrepo-data-service/services/src/main/java/at/tuwien/mapper/MetadataMapper.java
@@ -10,6 +10,7 @@ import at.tuwien.api.database.internal.PrivilegedDatabaseDto;
 import at.tuwien.api.database.internal.PrivilegedViewDto;
 import at.tuwien.api.database.table.TableBriefDto;
 import at.tuwien.api.database.table.TableDto;
+import at.tuwien.api.database.table.columns.ColumnBriefDto;
 import at.tuwien.api.database.table.columns.ColumnDto;
 import at.tuwien.api.database.table.internal.PrivilegedTableDto;
 import at.tuwien.api.user.PrivilegedUserDto;
@@ -33,9 +34,6 @@ public interface MetadataMapper {
 
     ViewColumnDto columnDtoToViewColumnDto(ColumnDto data);
 
-    /* keep */
-    TableBriefDto tableDtoToTableBriefDto(TableDto data);
-
     @Mappings({
             @Mapping(target = "database", expression = "java(PrivilegedDatabaseDto.builder().container(PrivilegedContainerDto.builder().image(new ImageDto()).build()).build())")
     })
@@ -47,4 +45,9 @@ public interface MetadataMapper {
 
     PrivilegedUserDto userDtoToPrivilegedUserDto(UserDto data);
 
+    @Mappings({
+            @Mapping(target = "databaseId", source = "tdbid")
+    })
+    TableBriefDto tableDtoToTableBriefDto(TableDto data);
+
 }
diff --git a/dbrepo-data-service/services/src/main/java/at/tuwien/service/SchemaService.java b/dbrepo-data-service/services/src/main/java/at/tuwien/service/SchemaService.java
index 2319d16b3974feaa206ab99235c3cc43b92579c8..f5ef05b44aed27a8fb43dbd123b5096e36c2283d 100644
--- a/dbrepo-data-service/services/src/main/java/at/tuwien/service/SchemaService.java
+++ b/dbrepo-data-service/services/src/main/java/at/tuwien/service/SchemaService.java
@@ -9,8 +9,25 @@ import java.sql.SQLException;
 
 public interface SchemaService {
 
+    /**
+     * Inspects the schema (columns with names, data types, unique-, check-, primary- and foreign key constraints) of
+     * a table with given name in the given database.
+     * @param database The database.
+     * @param tableName The table name.
+     * @return The inspected table if successful.
+     * @throws SQLException The connection to the database could not be established.
+     * @throws TableNotFoundException The table was not found in the given database.
+     */
     TableDto inspectTable(PrivilegedDatabaseDto database, String tableName) throws SQLException,
-            QueryMalformedException, TableNotFoundException;
+            TableNotFoundException;
 
-    ViewDto inspectView(PrivilegedDatabaseDto database, String viewName) throws SQLException, ViewMalformedException, ViewNotFoundException, ViewSchemaException;
+    /**
+     * Inspects the schema (columns with names, data types) of a view with given name in the given database.
+     * @param database The database.
+     * @param viewName The table name.
+     * @return The inspected view if successful.
+     * @throws SQLException The connection to the database could not be established.
+     * @throws ViewNotFoundException The view was not found in the given database.
+     */
+    ViewDto inspectView(PrivilegedDatabaseDto database, String viewName) throws SQLException, ViewNotFoundException;
 }
diff --git a/dbrepo-data-service/services/src/main/java/at/tuwien/service/TableService.java b/dbrepo-data-service/services/src/main/java/at/tuwien/service/TableService.java
index fb045b4a19217d968fdf27e4753c2596bc0f9431..98ed0ec7aea96a0c757d480d2829e2dfb82a2665 100644
--- a/dbrepo-data-service/services/src/main/java/at/tuwien/service/TableService.java
+++ b/dbrepo-data-service/services/src/main/java/at/tuwien/service/TableService.java
@@ -21,11 +21,10 @@ public interface TableService {
      * @return List of tables, if successful.
      * @throws SQLException Failed to parse SQL query, contains invalid syntax.
      * @throws TableNotFoundException The table could not be inspected in the data database.
-     * @throws QueryMalformedException The inspection query is malformed.
      * @throws DatabaseMalformedException The database inspection was unsuccessful, likely due to a bug in the mapping.
      */
     List<TableDto> getSchemas(PrivilegedDatabaseDto database) throws SQLException, TableNotFoundException,
-            QueryMalformedException, DatabaseMalformedException;
+            DatabaseMalformedException;
 
     /**
      * Generate table statistic for a given table. Only numerical columns are calculated.
@@ -59,10 +58,9 @@ public interface TableService {
      * @throws TableNotFoundException The table could not be inspected in the data database.
      * @throws TableExistsException The table name already exists in the information_schema.
      * @throws TableNotFoundException The table could not be inspected in the data database.
-     * @throws QueryMalformedException The create/inspection query is malformed.
      */
     TableDto createTable(PrivilegedDatabaseDto database, TableCreateDto data) throws SQLException,
-            TableMalformedException, TableExistsException, TableNotFoundException, QueryMalformedException;
+            TableMalformedException, TableExistsException, TableNotFoundException;
 
     /**
      * Drops a table in given table object.
diff --git a/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/SchemaServiceMariaDbImpl.java b/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/SchemaServiceMariaDbImpl.java
index 537c4878a4f44a1b47e39278b3233f03448cf439..cc5840080b21ae8549632db7123e12ec72b1ee30 100644
--- a/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/SchemaServiceMariaDbImpl.java
+++ b/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/SchemaServiceMariaDbImpl.java
@@ -1,17 +1,14 @@
 package at.tuwien.service.impl;
 
 import at.tuwien.api.database.DatabaseDto;
-import at.tuwien.api.database.ViewColumnDto;
 import at.tuwien.api.database.ViewDto;
 import at.tuwien.api.database.internal.PrivilegedDatabaseDto;
 import at.tuwien.api.database.table.TableDto;
-import at.tuwien.api.database.table.columns.ColumnDto;
 import at.tuwien.api.database.table.constraints.unique.UniqueDto;
 import at.tuwien.config.QueryConfig;
 import at.tuwien.exception.TableNotFoundException;
-import at.tuwien.exception.ViewMalformedException;
 import at.tuwien.exception.ViewNotFoundException;
-import at.tuwien.exception.ViewSchemaException;
+import at.tuwien.mapper.DataMapper;
 import at.tuwien.mapper.MariaDbMapper;
 import at.tuwien.mapper.MetadataMapper;
 import at.tuwien.service.SchemaService;
@@ -25,20 +22,20 @@ import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.LinkedList;
-import java.util.List;
-import java.util.Optional;
 
 @Log4j2
 @Service
 public class SchemaServiceMariaDbImpl extends HibernateConnector implements SchemaService {
 
+    private final DataMapper dataMapper;
     private final QueryConfig queryConfig;
     private final MariaDbMapper mariaDbMapper;
     private final MetadataMapper metadataMapper;
 
     @Autowired
-    public SchemaServiceMariaDbImpl(QueryConfig queryConfig, MariaDbMapper mariaDbMapper,
+    public SchemaServiceMariaDbImpl(DataMapper dataMapper, QueryConfig queryConfig, MariaDbMapper mariaDbMapper,
                                     MetadataMapper metadataMapper) {
+        this.dataMapper = dataMapper;
         this.queryConfig = queryConfig;
         this.mariaDbMapper = mariaDbMapper;
         this.metadataMapper = metadataMapper;
@@ -56,7 +53,7 @@ public class SchemaServiceMariaDbImpl extends HibernateConnector implements Sche
             statement1.setString(1, database.getInternalName());
             statement1.setString(2, tableName);
             log.trace("1={}, 2={}", database.getInternalName(), tableName);
-            TableDto table = mariaDbMapper.schemaResultSetToTable(metadataMapper.privilegedDatabaseDtoToDatabaseDto(database), statement1.executeQuery());
+            TableDto table = dataMapper.schemaResultSetToTable(metadataMapper.privilegedDatabaseDtoToDatabaseDto(database), statement1.executeQuery());
             /* obtain columns metadata */
             final PreparedStatement statement2 = connection.prepareStatement(mariaDbMapper.databaseTableColumnsSelectRawQuery());
             statement2.setString(1, database.getInternalName());
@@ -64,7 +61,7 @@ public class SchemaServiceMariaDbImpl extends HibernateConnector implements Sche
             log.trace("1={}, 2={}", database.getInternalName(), tableName);
             final ResultSet resultSet2 = statement2.executeQuery();
             while (resultSet2.next()) {
-                table = mariaDbMapper.resultSetToTable(resultSet2, table, queryConfig);
+                table = dataMapper.resultSetToTable(resultSet2, table, queryConfig);
             }
             /* obtain check constraints metadata */
             final PreparedStatement statement3 = connection.prepareStatement(mariaDbMapper.columnsCheckConstraintSelectRawQuery());
@@ -86,7 +83,7 @@ public class SchemaServiceMariaDbImpl extends HibernateConnector implements Sche
             log.trace("1={}, 2={}", database.getInternalName(), tableName);
             final ResultSet resultSet4 = statement4.executeQuery();
             while (resultSet4.next()) {
-                table = mariaDbMapper.resultSetToConstraint(resultSet4, table);
+                table = dataMapper.resultSetToConstraint(resultSet4, table);
                 for (UniqueDto uk : table.getConstraints().getUniques()) {
                     uk.setTable(metadataMapper.tableDtoToTableBriefDto(table));
                     final TableDto tmpTable = table;
@@ -133,7 +130,7 @@ public class SchemaServiceMariaDbImpl extends HibernateConnector implements Sche
             if (!resultSet1.next()) {
                 throw new ViewNotFoundException("Failed to find view in the information schema");
             }
-            ViewDto view = mariaDbMapper.schemaResultSetToView(metadataMapper.privilegedDatabaseDtoToDatabaseDto(privilegedDatabase), resultSet1);
+            ViewDto view = dataMapper.schemaResultSetToView(metadataMapper.privilegedDatabaseDtoToDatabaseDto(privilegedDatabase), resultSet1);
             view.setDatabase(database);
             view.setVdbid(database.getId());
             view.setCreator(database.getCreator());
@@ -148,7 +145,7 @@ public class SchemaServiceMariaDbImpl extends HibernateConnector implements Sche
                     .columns(new LinkedList<>())
                     .build();
             while (resultSet2.next()) {
-                tmp = mariaDbMapper.resultSetToTable(resultSet2, tmp, queryConfig);
+                tmp = dataMapper.resultSetToTable(resultSet2, tmp, queryConfig);
             }
             view.setColumns(tmp.getColumns()
                     .stream()
diff --git a/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/SubsetServiceMariaDbImpl.java b/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/SubsetServiceMariaDbImpl.java
index d298f2fada278f1eba060b030fbcc1040324adc1..2ab2f7b349fe6cf2ba2129679a22024e9167c0fc 100644
--- a/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/SubsetServiceMariaDbImpl.java
+++ b/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/SubsetServiceMariaDbImpl.java
@@ -14,6 +14,7 @@ import at.tuwien.config.S3Config;
 import at.tuwien.exception.*;
 import at.tuwien.gateway.DataDatabaseSidecarGateway;
 import at.tuwien.gateway.MetadataServiceGateway;
+import at.tuwien.mapper.DataMapper;
 import at.tuwien.mapper.MariaDbMapper;
 import at.tuwien.mapper.MetadataMapper;
 import at.tuwien.service.SubsetService;
@@ -35,6 +36,7 @@ import java.util.UUID;
 public class SubsetServiceMariaDbImpl extends HibernateConnector implements SubsetService {
 
     private final S3Config s3Config;
+    private final DataMapper dataMapper;
     private final MariaDbMapper mariaDbMapper;
     private final MetadataMapper metadataMapper;
     private final StorageService storageService;
@@ -42,10 +44,12 @@ public class SubsetServiceMariaDbImpl extends HibernateConnector implements Subs
     private final DataDatabaseSidecarGateway dataDatabaseSidecarGateway;
 
     @Autowired
-    public SubsetServiceMariaDbImpl(S3Config s3Config, MariaDbMapper mariaDbMapper, MetadataMapper metadataMapper,
-                                    StorageService storageService, MetadataServiceGateway metadataServiceGateway,
+    public SubsetServiceMariaDbImpl(S3Config s3Config, DataMapper dataMapper, MariaDbMapper mariaDbMapper,
+                                    MetadataMapper metadataMapper, StorageService storageService,
+                                    MetadataServiceGateway metadataServiceGateway,
                                     DataDatabaseSidecarGateway dataDatabaseSidecarGateway) {
         this.s3Config = s3Config;
+        this.dataMapper = dataMapper;
         this.mariaDbMapper = mariaDbMapper;
         this.metadataMapper = metadataMapper;
         this.storageService = storageService;
@@ -97,7 +101,7 @@ public class SubsetServiceMariaDbImpl extends HibernateConnector implements Subs
             SQLException {
         final List<ColumnDto> columns;
         try {
-            columns = mariaDbMapper.parseColumns(metadataMapper.privilegedDatabaseDtoToDatabaseDto(database), query.getQuery());
+            columns = dataMapper.parseColumns(metadataMapper.privilegedDatabaseDtoToDatabaseDto(database), query.getQuery());
         } catch (JSQLParserException e) {
             log.error("Failed to map/parse columns: {}", e.getMessage());
             throw new TableMalformedException("Failed to map/parse columns: " + e.getMessage(), e);
@@ -129,7 +133,7 @@ public class SubsetServiceMariaDbImpl extends HibernateConnector implements Subs
             final ResultSet resultSet = statement.executeQuery();
             final List<QueryDto> queries = new LinkedList<>();
             while (resultSet.next()) {
-                final QueryDto query = mariaDbMapper.resultSetToQueryDto(resultSet);
+                final QueryDto query = dataMapper.resultSetToQueryDto(resultSet);
                 query.setIdentifiers(identifiers.stream()
                         .filter(i -> i.getType().equals(IdentifierTypeDto.SUBSET))
                         .filter(i -> i.getQueryId().equals(query.getId()))
@@ -176,7 +180,7 @@ public class SubsetServiceMariaDbImpl extends HibernateConnector implements Subs
         try {
             final PreparedStatement preparedStatement = connection.prepareStatement(statement);
             final ResultSet resultSet = preparedStatement.executeQuery();
-            return mariaDbMapper.resultListToQueryResultDto(columns, resultSet);
+            return dataMapper.resultListToQueryResultDto(columns, resultSet);
         } catch (SQLException e) {
             log.error("Failed to execute and map time-versioned query: {}", e.getMessage());
             throw new TableMalformedException("Failed to execute and map time-versioned query: " + e.getMessage(), e);
@@ -214,7 +218,7 @@ public class SubsetServiceMariaDbImpl extends HibernateConnector implements Subs
             if (!resultSet.next()) {
                 throw new QueryNotFoundException("Failed to find query");
             }
-            final QueryDto query = mariaDbMapper.resultSetToQueryDto(resultSet);
+            final QueryDto query = dataMapper.resultSetToQueryDto(resultSet);
             query.setIdentifiers(metadataServiceGateway.getIdentifiers(database.getId(), queryId));
             final UserDto creator = metadataServiceGateway.getUserById(query.getCreatedBy());
             log.debug("retrieved creator from metadata service: creator.id={}, creator.username={}", creator.getId(), creator.getUsername());
diff --git a/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/TableServiceMariaDbImpl.java b/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/TableServiceMariaDbImpl.java
index 55e96c516185699e6c17e8e58ed43de1637da1bf..be15d46895d37f665120f58b047ae52fb7d7742f 100644
--- a/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/TableServiceMariaDbImpl.java
+++ b/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/TableServiceMariaDbImpl.java
@@ -13,6 +13,7 @@ import at.tuwien.api.database.table.internal.TableCreateDto;
 import at.tuwien.config.S3Config;
 import at.tuwien.exception.*;
 import at.tuwien.gateway.DataDatabaseSidecarGateway;
+import at.tuwien.mapper.DataMapper;
 import at.tuwien.mapper.MariaDbMapper;
 import at.tuwien.service.SchemaService;
 import at.tuwien.service.StorageService;
@@ -33,16 +34,18 @@ import java.util.*;
 public class TableServiceMariaDbImpl extends HibernateConnector implements TableService {
 
     private final S3Config s3Config;
+    private final DataMapper dataMapper;
     private final MariaDbMapper mariaDbMapper;
     private final SchemaService schemaService;
     private final StorageService storageService;
     private final DataDatabaseSidecarGateway dataDatabaseSidecarGateway;
 
     @Autowired
-    public TableServiceMariaDbImpl(S3Config s3Config, MariaDbMapper mariaDbMapper, SchemaService schemaService,
-                                   StorageService storageService,
+    public TableServiceMariaDbImpl(S3Config s3Config, DataMapper dataMapper, MariaDbMapper mariaDbMapper,
+                                   SchemaService schemaService, StorageService storageService,
                                    DataDatabaseSidecarGateway dataDatabaseSidecarGateway) {
         this.s3Config = s3Config;
+        this.dataMapper = dataMapper;
         this.mariaDbMapper = mariaDbMapper;
         this.schemaService = schemaService;
         this.storageService = storageService;
@@ -51,7 +54,7 @@ public class TableServiceMariaDbImpl extends HibernateConnector implements Table
 
     @Override
     public List<TableDto> getSchemas(PrivilegedDatabaseDto database) throws SQLException, TableNotFoundException,
-            QueryMalformedException, DatabaseMalformedException {
+            DatabaseMalformedException {
         final ComboPooledDataSource dataSource = getPrivilegedDataSource(database);
         final Connection connection = dataSource.getConnection();
         final List<TableDto> tables = new LinkedList<>();
@@ -91,7 +94,7 @@ public class TableServiceMariaDbImpl extends HibernateConnector implements Table
             /* obtain statistic */
             final ResultSet resultSet = connection.prepareStatement(mariaDbMapper.tableColumnStatisticsSelectRawQuery(table.getColumns(), table.getInternalName()))
                     .executeQuery();
-            statistic = mariaDbMapper.resultSetToTableStatistic(resultSet);
+            statistic = dataMapper.resultSetToTableStatistic(resultSet);
             statistic.setRows(getCount(table, null));
         } catch (SQLException e) {
             connection.rollback();
@@ -109,14 +112,13 @@ public class TableServiceMariaDbImpl extends HibernateConnector implements Table
     }
 
     @Override
-    public TableDto find(PrivilegedDatabaseDto database, String tableName) throws TableNotFoundException, SQLException,
-            QueryMalformedException {
+    public TableDto find(PrivilegedDatabaseDto database, String tableName) throws TableNotFoundException, SQLException {
         return schemaService.inspectTable(database, tableName);
     }
 
     @Override
     public TableDto createTable(PrivilegedDatabaseDto database, TableCreateDto data) throws SQLException,
-            TableMalformedException, TableExistsException, TableNotFoundException, QueryMalformedException {
+            TableMalformedException, TableExistsException, TableNotFoundException {
         final String tableName = mariaDbMapper.nameToInternalName(data.getName());
         final ComboPooledDataSource dataSource = getPrivilegedDataSource(database);
         final Connection connection = dataSource.getConnection();
@@ -181,7 +183,7 @@ public class TableServiceMariaDbImpl extends HibernateConnector implements Table
                             timestamp, size, page))
                     .executeQuery();
             connection.commit();
-            queryResult = mariaDbMapper.resultListToQueryResultDto(table.getColumns(), resultSet);
+            queryResult = dataMapper.resultListToQueryResultDto(table.getColumns(), resultSet);
         } catch (SQLException e) {
             connection.rollback();
             log.error("Failed to find data from table {}.{}: {}", table.getDatabase().getInternalName(), table.getInternalName(), e.getMessage());
@@ -205,7 +207,7 @@ public class TableServiceMariaDbImpl extends HibernateConnector implements Table
             final ResultSet resultSet = connection.prepareStatement(mariaDbMapper.selectHistoryRawQuery(
                             table.getDatabase().getInternalName(), table.getInternalName(), size))
                     .executeQuery();
-            history = mariaDbMapper.resultSetToTableHistory(resultSet);
+            history = dataMapper.resultSetToTableHistory(resultSet);
             connection.commit();
         } catch (SQLException e) {
             connection.rollback();
diff --git a/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/ViewServiceMariaDbImpl.java b/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/ViewServiceMariaDbImpl.java
index c85f5bfbdba9ffbe83d512a37f3f5cdebf5e4c1d..6f88c409737605671eb31e8b151e9a35ae23afa5 100644
--- a/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/ViewServiceMariaDbImpl.java
+++ b/dbrepo-data-service/services/src/main/java/at/tuwien/service/impl/ViewServiceMariaDbImpl.java
@@ -11,6 +11,7 @@ import at.tuwien.config.QueryConfig;
 import at.tuwien.config.S3Config;
 import at.tuwien.exception.*;
 import at.tuwien.gateway.DataDatabaseSidecarGateway;
+import at.tuwien.mapper.DataMapper;
 import at.tuwien.mapper.MariaDbMapper;
 import at.tuwien.mapper.MetadataMapper;
 import at.tuwien.service.SchemaService;
@@ -37,6 +38,7 @@ import java.util.List;
 public class ViewServiceMariaDbImpl extends HibernateConnector implements ViewService {
 
     private final S3Config s3Config;
+    private final DataMapper dataMapper;
     private final QueryConfig queryConfig;
     private final MariaDbMapper mariaDbMapper;
     private final SchemaService schemaService;
@@ -45,11 +47,12 @@ public class ViewServiceMariaDbImpl extends HibernateConnector implements ViewSe
     private final DataDatabaseSidecarGateway dataDatabaseSidecarGateway;
 
     @Autowired
-    public ViewServiceMariaDbImpl(S3Config s3Config, QueryConfig queryConfig, MariaDbMapper mariaDbMapper,
-                                  SchemaService schemaService, StorageService storageService,
-                                  MetadataMapper metadataMapper,
+    public ViewServiceMariaDbImpl(S3Config s3Config, DataMapper dataMapper, QueryConfig queryConfig,
+                                  MariaDbMapper mariaDbMapper, SchemaService schemaService,
+                                  StorageService storageService, MetadataMapper metadataMapper,
                                   DataDatabaseSidecarGateway dataDatabaseSidecarGateway) {
         this.s3Config = s3Config;
+        this.dataMapper = dataMapper;
         this.queryConfig = queryConfig;
         this.mariaDbMapper = mariaDbMapper;
         this.schemaService = schemaService;
@@ -122,7 +125,7 @@ public class ViewServiceMariaDbImpl extends HibernateConnector implements ViewSe
             statement2.setString(2, view.getInternalName());
             final ResultSet resultSet2 = statement2.executeQuery();
             while (resultSet2.next()) {
-                view = mariaDbMapper.resultSetToTable(resultSet2, view, queryConfig);
+                view = dataMapper.resultSetToTable(resultSet2, view, queryConfig);
             }
             connection.commit();
         } catch (SQLException e) {
@@ -152,7 +155,7 @@ public class ViewServiceMariaDbImpl extends HibernateConnector implements ViewSe
                             mariaDbMapper.selectDatasetRawQuery(view.getDatabase().getInternalName(),
                                     view.getInternalName(), mappedColumns, timestamp, size, page))
                     .executeQuery();
-            queryResult = mariaDbMapper.resultListToQueryResultDto(mappedColumns, resultSet);
+            queryResult = dataMapper.resultListToQueryResultDto(mappedColumns, resultSet);
             queryResult.setId(view.getId());
             connection.commit();
         } catch (SQLException e) {
diff --git a/dbrepo-metadata-db/Dockerfile b/dbrepo-metadata-db/Dockerfile
deleted file mode 100644
index dab74c702c6cab912ed060e9cc92a3d74b1e66c8..0000000000000000000000000000000000000000
--- a/dbrepo-metadata-db/Dockerfile
+++ /dev/null
@@ -1,6 +0,0 @@
-FROM bitnami/mariadb:11.2.2-debian-11-r0 as runtime
-
-ENV MARIADB_DATABASE=fda
-ENV MARIADB_ROOT_PASSWORD=dbrepo
-
-COPY ./setup-schema.sql /docker-entrypoint-initdb.d/setup-schema.sql
\ No newline at end of file
diff --git a/dbrepo-metadata-db/migrate_1.4.0-1.4.1.sql b/dbrepo-metadata-db/migrate_1.4.0-1.4.1.sql
deleted file mode 100644
index a849d52476bae19b896c710432f511efafd4ebf6..0000000000000000000000000000000000000000
--- a/dbrepo-metadata-db/migrate_1.4.0-1.4.1.sql
+++ /dev/null
@@ -1,19 +0,0 @@
-ALTER TABLE mdb_databases DROP SYSTEM VERSIONING;
-ALTER TABLE mdb_databases ADD COLUMN image longblob;
-ALTER TABLE mdb_databases ADD SYSTEM VERSIONING;
-ALTER TABLE mdb_tables DROP SYSTEM VERSIONING;
-ALTER TABLE mdb_tables ADD COLUMN processed_constraints BOOLEAN NOT NULL DEFAULT false;
-ALTER TABLE mdb_tables ADD SYSTEM VERSIONING;
-ALTER TABLE mdb_columns DROP SYSTEM VERSIONING;
-ALTER TABLE mdb_columns DROP COLUMN alias;
-ALTER TABLE mdb_columns ADD SYSTEM VERSIONING;
-ALTER TABLE mdb_constraints_foreign_key DROP SYSTEM VERSIONING;
-ALTER TABLE mdb_constraints_foreign_key ADD COLUMN name VARCHAR(255) NOT NULL;
-ALTER TABLE mdb_constraints_foreign_key ADD SYSTEM VERSIONING;
-ALTER TABLE mdb_constraints_unique DROP SYSTEM VERSIONING;
-ALTER TABLE mdb_constraints_unique ADD COLUMN name VARCHAR(255) NOT NULL;
-ALTER TABLE mdb_constraints_unique ADD SYSTEM VERSIONING;
-ALTER TABLE mdb_view_columns DROP SYSTEM VERSIONING;
-ALTER TABLE mdb_view_columns ADD COLUMN alias VARCHAR(100);
-ALTER TABLE mdb_view_columns CHANGE COLUMN position ordinal_position INTEGER;
-ALTER TABLE mdb_view_columns ADD SYSTEM VERSIONING;
\ No newline at end of file
diff --git a/dbrepo-metadata-service/api/src/main/java/at/tuwien/api/database/table/TupleDto.java b/dbrepo-metadata-service/api/src/main/java/at/tuwien/api/database/table/TupleDto.java
index 88170c4e0f4e2ec5f805b2b045b7f9a0f65b5c3e..a428cb572604815ae80ab9f47f28965a7a784a69 100644
--- a/dbrepo-metadata-service/api/src/main/java/at/tuwien/api/database/table/TupleDto.java
+++ b/dbrepo-metadata-service/api/src/main/java/at/tuwien/api/database/table/TupleDto.java
@@ -16,7 +16,7 @@ import java.util.Map;
 @ToString
 public class TupleDto {
 
-    @NotNull(message = "data is required")
+    @NotNull
     private Map<String, Object> data;
 
 }
diff --git a/dbrepo-metadata-service/services/src/main/java/at/tuwien/config/JacksonConfig.java b/dbrepo-metadata-service/services/src/main/java/at/tuwien/config/JacksonConfig.java
index a451032e9de8d40f04724e44e85eb928dfba2653..61e7f2b180f15497b08c93ad8e474dc6a7722336 100644
--- a/dbrepo-metadata-service/services/src/main/java/at/tuwien/config/JacksonConfig.java
+++ b/dbrepo-metadata-service/services/src/main/java/at/tuwien/config/JacksonConfig.java
@@ -25,7 +25,6 @@ public class JacksonConfig {
         objectMapper.registerModule(new Hibernate6Module()); /* lazy load mapping on REST endpoints */
         objectMapper.configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false);
         objectMapper.setTimeZone(TimeZone.getTimeZone("UTC"));
-        log.debug("current time is {}", objectMapper.writeValueAsString(new Date()));
         return objectMapper;
     }
 
diff --git a/dbrepo-search-service/Pipfile.lock b/dbrepo-search-service/Pipfile.lock
index 3388956b20ba3f223fdf3cdeb3eab5f1a5337022..7117575adf21fb3bc4458a54cbb57c49c3e732ee 100644
--- a/dbrepo-search-service/Pipfile.lock
+++ b/dbrepo-search-service/Pipfile.lock
@@ -929,11 +929,11 @@
         },
         "pydantic": {
             "hashes": [
-                "sha256:c46c76a40bb1296728d7a8b99aa73dd70a48c3510111ff290034f860c99c419e",
-                "sha256:ea91b002777bf643bb20dd717c028ec43216b24a6001a280f83877fd2655d0b4"
+                "sha256:0c84efd9548d545f63ac0060c1e4d39bb9b14db8b3c0652338aecc07b5adec52",
+                "sha256:ee8538d41ccb9c0a9ad3e0e5f07bf15ed8015b481ced539a1759d8cc89ae90d0"
             ],
             "markers": "python_version >= '3.8'",
-            "version": "==2.7.3"
+            "version": "==2.7.4"
         },
         "pydantic-core": {
             "hashes": [
diff --git a/dbrepo-search-service/lib/dbrepo-1.4.4-py3-none-any.whl b/dbrepo-search-service/lib/dbrepo-1.4.4-py3-none-any.whl
index 503cfef91315990bbf06027d6de14c8b3184507b..7e8fd7fca5aa6158bf57952f7f1050a08b331402 100644
Binary files a/dbrepo-search-service/lib/dbrepo-1.4.4-py3-none-any.whl and b/dbrepo-search-service/lib/dbrepo-1.4.4-py3-none-any.whl differ
diff --git a/dbrepo-search-service/lib/dbrepo-1.4.4.tar.gz b/dbrepo-search-service/lib/dbrepo-1.4.4.tar.gz
index 9a90176f0a093b05d89d1cd74cf701cd0730861a..3e45d4513a31a1fda334ed9ad2c5cbad3803199a 100644
Binary files a/dbrepo-search-service/lib/dbrepo-1.4.4.tar.gz and b/dbrepo-search-service/lib/dbrepo-1.4.4.tar.gz differ
diff --git a/dbrepo-ui/components/identifier/Citation.vue b/dbrepo-ui/components/identifier/Citation.vue
index ca5d2da00fcfa2a5d8d64403fdb606896f7a1c4f..8cd96902d48009da22cd761849be1962ba07c248 100644
--- a/dbrepo-ui/components/identifier/Citation.vue
+++ b/dbrepo-ui/components/identifier/Citation.vue
@@ -63,8 +63,11 @@ export default {
       identifierService.findOne(this.identifier.id, accept)
         .then((citation) => {
           this.citation = citation
+          this.loading = false
         })
-        .finally(() => {
+        .error(({code, message}) => {
+          const toast = useToastInstance()
+          toast.error(this.$t(`${code}: ${message}`))
           this.loading = false
         })
     }
diff --git a/dbrepo-ui/locales/en-US.json b/dbrepo-ui/locales/en-US.json
index a14d7292831b1f2e81009be3d39f9051b99c6b8d..a345e0bec5362890064c770ccb96ec13eb431295 100644
--- a/dbrepo-ui/locales/en-US.json
+++ b/dbrepo-ui/locales/en-US.json
@@ -35,6 +35,9 @@
   "pages": {
     "identifier": {
       "title": "Identifier",
+      "export": {
+        "text": "Metadata Export"
+      },
       "pid": {
         "title": "Persistent Identifier"
       },
@@ -1209,10 +1212,6 @@
       "created": "Successfully created table",
       "semantics": "Successfully assigned semantic instance"
     },
-    "schema": {
-      "tables": "Successfully refreshed database tables metadata",
-      "views": "Successfully refreshed database views metadata"
-    },
     "schema": {
       "tables": "Successfully refreshed database tables metadata.",
       "views": "Successfully refreshed database views metadata."
diff --git a/docker-compose.yml b/docker-compose.yml
index 48d62373b213f20fcaf4443ebbb12437d4bf2f02..65d23f7e45b36a82df4cad73cccf6424b9413e61 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -14,13 +14,11 @@ services:
     restart: "no"
     container_name: dbrepo-metadata-db
     hostname: metadata-db
-    image: dbrepo-metadata-db:latest
-    build:
-      context: ./dbrepo-metadata-db
-      network: host
+    image: docker.io/bitnami/mariadb:11.1.3-debian-11-r6
     volumes:
       - metadata-db-data:/bitnami/mariadb
-      - ./dbrepo-metadata-db/setup-data.sql:/docker-entrypoint-initdb.d/setup-schema_local.sql
+      - ./dbrepo-metadata-db/setup-schema.sql:/docker-entrypoint-initdb.d/1_setup-schema.sql
+      - ./dbrepo-metadata-db/setup-data.sql:/docker-entrypoint-initdb.d/2_setup-data.sql
     ports:
       - "3306:3306"
     environment:
@@ -38,7 +36,7 @@ services:
     restart: "no"
     container_name: dbrepo-data-db
     hostname: data-db
-    image: docker.io/bitnami/mariadb-galera:11.2.2-debian-11-r0
+    image: docker.io/bitnami/mariadb:11.1.3-debian-11-r6
     volumes:
       - data-db-data:/bitnami/mariadb
       - "${SHARED_VOLUME:-/tmp}:/tmp"
@@ -46,7 +44,6 @@ services:
       - "3307:3306"
     environment:
       MARIADB_ROOT_PASSWORD: "${USER_DB_PASSWORD:-dbrepo}"
-      MARIADB_GALERA_MARIABACKUP_PASSWORD: "${USER_DB_BACKUP_PASSWORD:-dbrepo}"
     healthcheck:
       test: mysqladmin ping --user="${USER_DB_USERNAME:-root}" --password="${USER_DB_PASSWORD:-dbrepo}" --silent
       interval: 10s
@@ -59,7 +56,7 @@ services:
     restart: "no"
     container_name: dbrepo-auth-db
     hostname: auth-db
-    image: docker.io/bitnami/mariadb:11.2.2-debian-11-r0
+    image: docker.io/bitnami/mariadb:11.1.3-debian-11-r6
     volumes:
       - auth-db-data:/bitnami/mariadb
     ports:
@@ -331,7 +328,7 @@ services:
     restart: "no"
     container_name: dbrepo-gateway-service
     hostname: gateway-service
-    image: docker.io/nginx:1.25-alpine-slim
+    image: docker.io/nginx:1.27.0-alpine3.19-slim
     ports:
       - "80:80"
       - "443:443"
diff --git a/helm/dbrepo/Chart.lock b/helm/dbrepo/Chart.lock
index e7fbf0ea099942650d029a991cf3ca490ef1f11b..dd42ade0c3ffaa28c6d562fc46f30dafc81a7ecf 100644
--- a/helm/dbrepo/Chart.lock
+++ b/helm/dbrepo/Chart.lock
@@ -1,16 +1,16 @@
 dependencies:
 - name: opensearch
-  repository: https://opensearch-project.github.io/helm-charts/
-  version: 2.15.0
+  repository: https://charts.bitnami.com/bitnami
+  version: 1.2.2
 - name: keycloak
   repository: https://charts.bitnami.com/bitnami
   version: 17.3.3
-- name: mariadb-galera
+- name: mariadb
   repository: https://charts.bitnami.com/bitnami
-  version: 11.0.1
-- name: mariadb-galera
+  version: 14.1.4
+- name: mariadb
   repository: https://charts.bitnami.com/bitnami
-  version: 11.0.1
+  version: 14.1.4
 - name: rabbitmq
   repository: https://charts.bitnami.com/bitnami
   version: 14.0.0
@@ -20,5 +20,5 @@ dependencies:
 - name: tusd
   repository: https://charts.sagikazarmark.dev
   version: 0.1.2
-digest: sha256:f724e33944ae5284b9417a3424a4af9cd67eb8bea0baa0ebeddc76f4c0c9c63a
-generated: "2024-05-17T21:25:35.919266246+02:00"
+digest: sha256:867a4a60bbccfaeb880d000eeb634db20554ef91523aa3b1331c53bdf48e8db4
+generated: "2024-06-14T15:12:25.44560113+02:00"
diff --git a/helm/dbrepo/Chart.yaml b/helm/dbrepo/Chart.yaml
index 24e580a29731861c53e63192aa5346531260c135..4838a04ed0bbb9327b899097c22e01960760fee2 100644
--- a/helm/dbrepo/Chart.yaml
+++ b/helm/dbrepo/Chart.yaml
@@ -10,28 +10,28 @@ keywords:
   - dbrepo
 maintainers:
   - name: Martin Weise
-    email: martin.weise@tuwien.ac.at
+    email: martin.weise@tuwien.ac.a
 home: https://www.ifs.tuwien.ac.at/infrastructures/dbrepo/
 icon: https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/raw/master/dbrepo-ui/public/favicon.png
 dependencies:
   - name: opensearch
     alias: searchdb
-    version: 2.15.0
-    repository: https://opensearch-project.github.io/helm-charts/
+    version: 1.2.2
+    repository: https://charts.bitnami.com/bitnami
     condition: searchdb.enabled
   - name: keycloak
     alias: authservice
     version: 17.3.3
     repository: https://charts.bitnami.com/bitnami
     condition: authservice.enabled
-  - name: mariadb-galera
+  - name: mariadb
     alias: datadb
-    version: 11.0.1
+    version: 14.1.4
     repository: https://charts.bitnami.com/bitnami
     condition: datadb.enabled
-  - name: mariadb-galera
+  - name: mariadb
     alias: metadatadb
-    version: 11.0.1
+    version: 14.1.4
     repository: https://charts.bitnami.com/bitnami
     condition: metadatadb.enabled
   - name: rabbitmq
diff --git a/helm/dbrepo/README.md b/helm/dbrepo/README.md
index 01f699d9e1841eef85f4889248eef4c83bf4d49c..41109de1140b2dd42dc1c42daf19f7cb350918bd 100644
--- a/helm/dbrepo/README.md
+++ b/helm/dbrepo/README.md
@@ -10,7 +10,7 @@ sample [`values.yaml`](https://gitlab.phaidra.org/fair-data-austria-db-repositor
 for your deployment and update the variables, especially `hostname`.
 
 ```bash
-helm install my-release "oci://s210.dl.hpc.tuwien.ac.at/dbrepo/helm" --values ./values.yaml --version "1.4.4"
+helm install my-release "oci://registry.datalab.tuwien.ac.at/dbrepo/helm" --values ./values.yaml --version "1.4.4"
 ```
 
 ## Prerequisites
@@ -27,7 +27,7 @@ helm install my-release "oci://s210.dl.hpc.tuwien.ac.at/dbrepo/helm" --values ./
 To install the chart with the release name `my-release`:
 
 ```bash
-helm install my-release "oci://s210.dl.hpc.tuwien.ac.at/dbrepo/helm" --values ./values.yaml --version "1.4.4"
+helm install my-release "oci://oci://registry.datalab.tuwien.ac.at/dbrepo/helm" --values ./values.yaml --version "1.4.4"
 ```
 
 The command deploys DBRepo on the Kubernetes cluster in the default configuration. The Parameters section lists the
@@ -64,18 +64,18 @@ The command removes all the Kubernetes components associated with the chart and
 
 ### Metadata Database
 
-| Name                             | Description                                                      | Value         |
-| -------------------------------- | ---------------------------------------------------------------- | ------------- |
-| `metadatadb.enabled`             | Enable the Metadata Database.                                    | `true`        |
-| `metadatadb.image.debug`         | Set the logging level to `trace`. Otherwise, set to `info`.      | `false`       |
-| `metadatadb.host`                | The hostname for the microservices.                              | `metadata-db` |
-| `metadatadb.rootUser.user`       | The root username.                                               | `root`        |
-| `metadatadb.rootUser.password`   | The root user password.                                          | `dbrepo`      |
-| `metadatadb.jdbcExtraArgs`       | The extra arguments for JDBC connections in the microservices.   | `""`          |
-| `metadatadb.db.name`             | The database name.                                               | `fda`         |
-| `metadatadb.extraInitDbScripts`  | Additional init.db scripts that are executed on the first start. | `{}`          |
-| `metadatadb.persistence.enabled` | Enable persistent storage. Requires PV-provisioner.              | `false`       |
-| `metadatadb.replicaCount`        | The number of replicas, should be uneven (2n+1).                 | `3`           |
+| Name                                  | Description                                                      | Value         |
+| ------------------------------------- | ---------------------------------------------------------------- | ------------- |
+| `metadatadb.enabled`                  | Enable the Metadata Database.                                    | `true`        |
+| `metadatadb.host`                     | The hostname for the microservices.                              | `metadata-db` |
+| `metadatadb.auth.root`                | The root username.                                               | `root`        |
+| `metadatadb.auth.rootPassword`        | The root user password.                                          | `dbrepo`      |
+| `metadatadb.auth.database`            | The database name.                                               | `dbrepo`      |
+| `metadatadb.auth.replicationUser`     | The database replication username.                               | `replication` |
+| `metadatadb.auth.replicationPassword` | The database replication user password                           | `replication` |
+| `metadatadb.jdbcExtraArgs`            | The extra arguments for JDBC connections in the microservices.   | `""`          |
+| `metadatadb.extraInitDbScripts`       | Additional init.db scripts that are executed on the first start. | `{}`          |
+| `metadatadb.secondary.replicaCount`   | The number of replicas of the secondary database pods.           | `2`           |
 
 ### Auth Service
 
@@ -89,33 +89,28 @@ The command removes all the Kubernetes components associated with the chart and
 | `authservice.jwt.pubkey`         | The JWT public key from the `dbrepo-client`.                 | `MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqqnHQ2BWWW9vDNLRCcxD++xZg/16oqMo/c1l+lcFEjjAIJjJp/HqrPYU/U9GvquGE6PbVFtTzW1KcKawOW+FJNOA3CGo8Q1TFEfz43B8rZpKsFbJKvQGVv1Z4HaKPvLUm7iMm8Hv91cLduuoWx6Q3DPe2vg13GKKEZe7UFghF+0T9u8EKzA/XqQ0OiICmsmYPbwvf9N3bCKsB/Y10EYmZRb8IhCoV9mmO5TxgWgiuNeCTtNCv2ePYqL/U0WvyGFW0reasIK8eg3KrAUj8DpyOgPOVBn3lBGf+3KFSYi+0bwZbJZWqbC/Xlk20Go1YfeJPRIt7ImxD27R/lNjgDO/MwIDAQAB` |
 | `authservice.tls.enabled`        | Enable TLS/SSL communication. Required for HTTPS.            | `true`                                                                                                                                                                                                                                                                                                                                                                                                     |
 | `authservice.tls.existingSecret` | The secret containing the `tls.crt`, `tls.key` and `ca.crt`. | `ingress-cert`                                                                                                                                                                                                                                                                                                                                                                                             |
-| `authservice.tls.usePem`         | Use PEM certificates as input instead of PKS12/JKS stores.   | `true`                                                                                                                                                                                                                                                                                                                                                                                                     |
 | `authservice.metrics.enabled`    | Enable the Prometheus metrics export sidecar container.      | `false`                                                                                                                                                                                                                                                                                                                                                                                                    |
 | `authservice.client.id`          | The client id for the microservices.                         | `dbrepo-client`                                                                                                                                                                                                                                                                                                                                                                                            |
 | `authservice.client.secret`      | The client secret for the microservices.                     | `MUwRc7yfXSJwX8AdRMWaQC3Nep1VjwgG`                                                                                                                                                                                                                                                                                                                                                                         |
 
 ### Data Database
 
-| Name                         | Description                                                 | Value    |
-| ---------------------------- | ----------------------------------------------------------- | -------- |
-| `datadb.enabled`             | Enable the Data Database.                                   | `true`   |
-| `datadb.image.debug`         | Set the logging level to `trace`. Otherwise, set to `info`. | `false`  |
-| `datadb.rootUser.user`       | The root username.                                          | `root`   |
-| `datadb.rootUser.password`   | The root user password.                                     | `dbrepo` |
-| `datadb.persistence.enabled` | Enable persistent storage. Requires PV-provisioner.         | `false`  |
-| `datadb.replicaCount`        | The number of replicas, should be uneven (2n+1).            | `3`      |
+| Name                              | Description                                                 | Value         |
+| --------------------------------- | ----------------------------------------------------------- | ------------- |
+| `datadb.enabled`                  | Enable the Data Database.                                   | `true`        |
+| `datadb.image.debug`              | Set the logging level to `trace`. Otherwise, set to `info`. | `false`       |
+| `datadb.auth.rootPassword`        | The root user password.                                     | `dbrepo`      |
+| `datadb.auth.replicationUser`     | The database replication user password                      | `replication` |
+| `datadb.auth.replicationPassword` | The database replication user password                      | `replication` |
 
 ### Search Database
 
-| Name                           | Description                                         | Value       |
-| ------------------------------ | --------------------------------------------------- | ----------- |
-| `searchdb.enabled`             | Enable the Search Database.                         | `true`      |
-| `searchdb.host`                | The hostname for the microservices.                 | `search-db` |
-| `searchdb.port`                | The port for the microservices.                     | `9200`      |
-| `searchdb.username`            | The admin username.                                 | `admin`     |
-| `searchdb.password`            | The admin user password.                            | `admin`     |
-| `searchdb.replicas`            | The number of replicas.                             | `3`         |
-| `searchdb.persistence.enabled` | Enable persistent storage. Requires PV-provisioner. | `false`     |
+| Name                   | Description                         | Value       |
+| ---------------------- | ----------------------------------- | ----------- |
+| `searchdb.enabled`     | Enable the Data Database.           | `true.`     |
+| `searchdb.host`        | The hostname for the microservices. | `search-db` |
+| `searchdb.port`        | The port for the microservices.     | `9200`      |
+| `searchdb.clusterName` | The cluster name.                   | `search-db` |
 
 ### Upload Service
 
@@ -126,77 +121,83 @@ The command removes all the Kubernetes components associated with the chart and
 
 ### Broker Service
 
-| Name                                | Description                                                                     | Value                         |
-| ----------------------------------- | ------------------------------------------------------------------------------- | ----------------------------- |
-| `brokerservice.enabled`             | Enable the Broker Service.                                                      | `true`                        |
-| `brokerservice.endpoint`            | The management api endpoint for the microservices.                              | `http://broker-service:15672` |
-| `brokerservice.host`                | The hostname for the microservices.                                             | `broker-service`              |
-| `brokerservice.port`                | The port for the microservices.                                                 | `5672`                        |
-| `brokerservice.virtualHost`         | The default virtual host name.                                                  | `dbrepo`                      |
-| `brokerservice.queueName`           | The default queue name.                                                         | `dbrepo`                      |
-| `brokerservice.exchangeName`        | The default exchange name.                                                      | `dbrepo`                      |
-| `brokerservice.routingKey`          | The default routing key binding from the default queue to the default exchange. | `dbrepo.#`                    |
-| `brokerservice.connectionTimeout`   | The connection timeout in ms.                                                   | `60000`                       |
-| `brokerservice.persistence.enabled` | Enable persistent storage. Requires PV-provisioner.                             | `false`                       |
-| `brokerservice.replicaCount`        | The number of replicas.                                                         | `2`                           |
+| Name                                | Description                                                                                                                                                                                                                    | Value                                                                          |
+| ----------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------ |
+| `brokerservice.enabled`             | Enable the Broker Service.                                                                                                                                                                                                     | `true`                                                                         |
+| `brokerservice.image.debug`         | Set the logging level to `trace`. Otherwise, set to `info`.                                                                                                                                                                    | `true`                                                                         |
+| `brokerservice.endpoint`            | The management api endpoint for the microservices.                                                                                                                                                                             | `http://broker-service:15672`                                                  |
+| `brokerservice.host`                | The hostname for the microservices.                                                                                                                                                                                            | `broker-service`                                                               |
+| `brokerservice.port`                | The port for the microservices.                                                                                                                                                                                                | `5672`                                                                         |
+| `brokerservice.virtualHost`         | The default virtual host name.                                                                                                                                                                                                 | `dbrepo`                                                                       |
+| `brokerservice.queueName`           | The default queue name.                                                                                                                                                                                                        | `dbrepo`                                                                       |
+| `brokerservice.exchangeName`        | The default exchange name.                                                                                                                                                                                                     | `dbrepo`                                                                       |
+| `brokerservice.routingKey`          | The default routing key binding from the default queue to the default exchange.                                                                                                                                                | `dbrepo.#`                                                                     |
+| `brokerservice.connectionTimeout`   | The connection timeout in ms.                                                                                                                                                                                                  | `60000`                                                                        |
+| `brokerservice.auth.username`       | The initial administrator username.                                                                                                                                                                                            | `broker`                                                                       |
+| `brokerservice.auth.password`       | The initial administrator user password.                                                                                                                                                                                       | `broker`                                                                       |
+| `brokerservice.auth.passwordHash`   | The initial administrator user password has generated with [`generate-rabbitmq-pw.sh`](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/blob/release-1.4.4/helm/dbrepo/hack/generate-rabbitmq-pw.sh). | `1gwjNNTBPKLgyzbsUykfR0JIFC6nNqbNJaxzZ14uPT8JGcTZ`                             |
+| `brokerservice.extraPlugins`        | The list of plugins to be activated.                                                                                                                                                                                           | `rabbitmq_prometheus rabbitmq_auth_backend_oauth2 rabbitmq_auth_mechanism_ssl` |
+| `brokerservice.persistence.enabled` | If set to true, a PVC will be created.                                                                                                                                                                                         | `false`                                                                        |
+| `brokerservice.replicaCount`        | The number of replicas.                                                                                                                                                                                                        | `1`                                                                            |
 
 ### Analyse Service
 
-| Name                          | Description                                           | Value                           |
-| ----------------------------- | ----------------------------------------------------- | ------------------------------- |
-| `analyseservice.enabled`      | Enable the Broker Service.                            | `true`                          |
-| `analyseservice.endpoint`     | The url of the endpoint.                              | `http://analyse-service`        |
-| `analyseservice.s3.endpoint`  | The S3-capable endpoint the microservice connects to. | `http://storageservice-s3:9000` |
-| `analyseservice.replicaCount` | The number of replicas.                               | `2`                             |
+| Name                          | Description                                                 | Value                           |
+| ----------------------------- | ----------------------------------------------------------- | ------------------------------- |
+| `analyseservice.enabled`      | Enable the Broker Service.                                  | `true`                          |
+| `analyseservice.image.debug`  | Set the logging level to `trace`. Otherwise, set to `info`. | `false`                         |
+| `analyseservice.endpoint`     | The url of the endpoint.                                    | `http://analyse-service`        |
+| `analyseservice.s3.endpoint`  | The S3-capable endpoint the microservice connects to.       | `http://storageservice-s3:9000` |
+| `analyseservice.replicaCount` | The number of replicas.                                     | `2`                             |
 
 ### Metadata Service
 
-| Name                                       | Description                                                           | Value                           |
-| ------------------------------------------ | --------------------------------------------------------------------- | ------------------------------- |
-| `metadataservice.enabled`                  | Enable the Metadata Service.                                          | `true`                          |
-| `metadataservice.endpoint`                 | The Metadata Service endpoint.                                        | `http://metadata-service`       |
-| `metadataservice.admin.email`              | The OAI-PMH exposed admin e-mail.                                     | `noreply@example.com`           |
-| `metadataservice.deletedRecord`            | The OAI-PMH exposed delete policy.                                    | `permanent`                     |
-| `metadataservice.repositoryName`           | The OAI-PMH exposed repository name.                                  | `Database Repository`           |
-| `metadataservice.granularity`              | The OAI-PMH exposed record granularity.                               | `YYYY-MM-DDThh:mm:ssZ`          |
-| `metadataservice.datacite.enabled`         | Enable the DataCite account for minting DOIs.                         | `false`                         |
-| `metadataservice.datacite.url`             | The DataCite api endpoint url.                                        | `https://api.datacite.org`      |
-| `metadataservice.datacite.prefix`          | The DataCite prefix.                                                  | `""`                            |
-| `metadataservice.datacite.username`        | The DataCite api username.                                            | `""`                            |
-| `metadataservice.datacite.password`        | The DataCite api user password.                                       | `""`                            |
-| `metadataservice.sparql.connectionTimeout` | The connection timeout for sparql queries fetching remote data in ms. | `10000`                         |
-| `metadataservice.s3.endpoint`              | The S3-capable endpoint the microservice connects to.                 | `http://storageservice-s3:9000` |
-| `metadataservice.s3.auth.username`         | The S3-capable endpoint username (or access key id).                  | `seaweedfsadmin`                |
-| `metadataservice.s3.auth.password`         | The S3-capable endpoint user password (or access key secret).         | `seaweedfsadmin`                |
-| `metadataservice.replicaCount`             | The number of replicas.                                               | `2`                             |
+| Name                                       | Description                                                                        | Value                           |
+| ------------------------------------------ | ---------------------------------------------------------------------------------- | ------------------------------- |
+| `metadataservice.enabled`                  | Enable the Metadata Service.                                                       | `true`                          |
+| `metadataservice.image.debug`              | Set the logging level to `trace`. Otherwise, set to `info`.                        | `false`                         |
+| `metadataservice.endpoint`                 | The Metadata Service endpoint.                                                     | `http://metadata-service`       |
+| `metadataservice.admin.email`              | The OAI-PMH exposed e-mail for contacting the metadata records responsible person. | `noreply@example.com`           |
+| `metadataservice.deletedRecord`            | The OAI-PMH exposed delete policy.                                                 | `permanent`                     |
+| `metadataservice.repositoryName`           | The OAI-PMH exposed repository name.                                               | `Database Repository`           |
+| `metadataservice.granularity`              | The OAI-PMH exposed record granularity.                                            | `YYYY-MM-DDThh:mm:ssZ`          |
+| `metadataservice.datacite.enabled`         | If set to true, the service mints DOIs instead of local PIDs.                      | `false`                         |
+| `metadataservice.datacite.url`             | The DataCite api endpoint url.                                                     | `https://api.datacite.org`      |
+| `metadataservice.datacite.prefix`          | The DataCite prefix.                                                               | `""`                            |
+| `metadataservice.datacite.username`        | The DataCite api username.                                                         | `""`                            |
+| `metadataservice.datacite.password`        | The DataCite api user password.                                                    | `""`                            |
+| `metadataservice.sparql.connectionTimeout` | The connection timeout for sparql queries fetching remote data in ms.              | `10000`                         |
+| `metadataservice.s3.endpoint`              | The S3-capable endpoint the microservice connects to.                              | `http://storageservice-s3:9000` |
+| `metadataservice.s3.auth.username`         | The S3-capable endpoint username (or access key id).                               | `seaweedfsadmin`                |
+| `metadataservice.s3.auth.password`         | The S3-capable endpoint user password (or access key secret).                      | `seaweedfsadmin`                |
+| `metadataservice.replicaCount`             | The number of replicas.                                                            | `2`                             |
 
 ### Data Service
 
-| Name                                | Description                                                              | Value                                                                                                                       |
-| ----------------------------------- | ------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------- |
-| `dataservice.enabled`               | Enable the Metadata Service.                                             | `true`                                                                                                                      |
-| `dataservice.endpoint`              | The endpoint for the microservices.                                      | `http://data-service`                                                                                                       |
-| `dataservice.grant.read`            | The default database permissions for users with read access.             | `SELECT`                                                                                                                    |
-| `dataservice.grant.write`           | The default database permissions for users with write access.            | `SELECT, CREATE, CREATE VIEW, CREATE ROUTINE, CREATE TEMPORARY TABLES, LOCK TABLES, INDEX, TRIGGER, INSERT, UPDATE, DELETE` |
-| `dataservice.default.date`          | The default date format id for dates.                                    | `3`                                                                                                                         |
-| `dataservice.default.time`          | The default date format id for times.                                    | `4`                                                                                                                         |
-| `dataservice.default.timestamp`     | The default date format id for timestamps.                               | `1`                                                                                                                         |
-| `dataservice.s3.endpoint`           | The S3-capable endpoint the microservice connects to.                    | `http://storageservice-s3:9000`                                                                                             |
-| `dataservice.s3.auth.username`      | The S3-capable endpoint username (or access key id).                     | `seaweedfsadmin`                                                                                                            |
-| `dataservice.s3.auth.password`      | The S3-capable endpoint user password (or access key secret).            | `seaweedfsadmin`                                                                                                            |
-| `dataservice.s3.filePath`           | The local location to download/upload files from/to S3-capable endpoint. | `/s3`                                                                                                                       |
-| `dataservice.consumerConcurrentMin` | The minimum broker service consumer number.                              | `1`                                                                                                                         |
-| `dataservice.consumerConcurrentMax` | The maximum broker service consumer number.                              | `5`                                                                                                                         |
-| `dataservice.requeueRejected`       | Enable re-queueing of rejected messages to the broker service.           | `false`                                                                                                                     |
-| `dataservice.replicaCount`          | The number of replicas.                                                  | `2`                                                                                                                         |
+| Name                            | Description                                                                                         | Value                                                                                                                       |
+| ------------------------------- | --------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------- |
+| `dataservice.enabled`           | Enable the Metadata Service.                                                                        | `true`                                                                                                                      |
+| `dataservice.endpoint`          | The endpoint for the microservices.                                                                 | `http://data-service`                                                                                                       |
+| `dataservice.image.debug`       | Set the logging level to `trace`. Otherwise, set to `info`.                                         | `false`                                                                                                                     |
+| `dataservice.grant.read`        | The default database permissions for users with read access.                                        | `SELECT`                                                                                                                    |
+| `dataservice.grant.write`       | The default database permissions for users with write access.                                       | `SELECT, CREATE, CREATE VIEW, CREATE ROUTINE, CREATE TEMPORARY TABLES, LOCK TABLES, INDEX, TRIGGER, INSERT, UPDATE, DELETE` |
+| `dataservice.default.date`      | The default date format id for dates. Default: YYYY-MM-dd (e.g. 2024-06-15).                        | `3`                                                                                                                         |
+| `dataservice.default.time`      | The default date format id for times. Default: HH:mm:ss (e.g. 14:23:42).                            | `4`                                                                                                                         |
+| `dataservice.default.timestamp` | The default date format id for timestamps. Default: YYYY-MM-dd HH:mm:ss (e.g. 2024-06-15 14:23:42). | `1`                                                                                                                         |
+| `dataservice.s3.endpoint`       | The S3-capable endpoint the microservice connects to.                                               | `http://storageservice-s3:9000`                                                                                             |
+| `dataservice.s3.auth.username`  | The S3-capable endpoint username (or access key id).                                                | `seaweedfsadmin`                                                                                                            |
+| `dataservice.s3.auth.password`  | The S3-capable endpoint user password (or access key secret).                                       | `seaweedfsadmin`                                                                                                            |
+| `dataservice.s3.filePath`       | The local location to download/upload files from/to S3-capable endpoint.                            | `/s3`                                                                                                                       |
+| `dataservice.replicaCount`      | The number of replicas.                                                                             | `2`                                                                                                                         |
 
 ### Search Service
 
-| Name                         | Description                         | Value                   |
-| ---------------------------- | ----------------------------------- | ----------------------- |
-| `searchservice.enabled`      | Enable the Search Service.          | `true`                  |
-| `searchservice.endpoint`     | The endpoint for the microservices. | `http://search-service` |
-| `searchservice.replicaCount` | The number of replicas.             | `2`                     |
+| Name                         | Description                                                 | Value                   |
+| ---------------------------- | ----------------------------------------------------------- | ----------------------- |
+| `searchservice.enabled`      | Enable the Search Service.                                  | `true`                  |
+| `searchservice.endpoint`     | The endpoint for the microservices.                         | `http://search-service` |
+| `searchservice.image.debug`  | Set the logging level to `trace`. Otherwise, set to `info`. | `false`                 |
+| `searchservice.replicaCount` | The number of replicas.                                     | `2`                     |
 
 ### Storage Service
 
@@ -209,6 +210,7 @@ The command removes all the Kubernetes components associated with the chart and
 | Name                              | Description                                                                  | Value                   |
 | --------------------------------- | ---------------------------------------------------------------------------- | ----------------------- |
 | `ui.enabled`                      | Enable the User Interface.                                                   | `true`                  |
+| `ui.image.debug`                  | Set the logging level to `trace`. Otherwise, set to `info`.                  | `false`                 |
 | `ui.public.api.client`            | The endpoint for the client api.                                             | `""`                    |
 | `ui.public.api.server`            | The endpoint for the server api.                                             | `""`                    |
 | `ui.public.title`                 | The user interface title.                                                    | `Database Repository`   |
@@ -227,6 +229,9 @@ The command removes all the Kubernetes components associated with the chart and
 
 ### Ingress
 
-| Name              | Description         | Value   |
-| ----------------- | ------------------- | ------- |
-| `ingress.enabled` | Enable the ingress. | `false` |
+| Name                     | Description                                                                                                     | Value          |
+| ------------------------ | --------------------------------------------------------------------------------------------------------------- | -------------- |
+| `ingress.enabled`        | Enable the ingress.                                                                                             | `false`        |
+| `ingress.className`      | The ingress class name.                                                                                         | `nginx`        |
+| `ingress.tls.enabled`    | Enable the ingress.                                                                                             | `true`         |
+| `ingress.tls.secretName` | The secret holding the SSL/TLS certificate. Needs to have keys `tls.crt` and `tls.key` and optionally `ca.crt`. | `ingress-cert` |
diff --git a/helm/dbrepo/charts/mariadb-14.1.4.tgz b/helm/dbrepo/charts/mariadb-14.1.4.tgz
new file mode 100644
index 0000000000000000000000000000000000000000..83f470bdcade4fdfc13b0d1f4f46095b877e3bcd
Binary files /dev/null and b/helm/dbrepo/charts/mariadb-14.1.4.tgz differ
diff --git a/helm/dbrepo/charts/mariadb-galera-11.0.1.tgz b/helm/dbrepo/charts/mariadb-galera-11.0.1.tgz
deleted file mode 100644
index 75966763de12ffca164d475cccac327a338857df..0000000000000000000000000000000000000000
Binary files a/helm/dbrepo/charts/mariadb-galera-11.0.1.tgz and /dev/null differ
diff --git a/helm/dbrepo/charts/opensearch-1.2.2.tgz b/helm/dbrepo/charts/opensearch-1.2.2.tgz
new file mode 100644
index 0000000000000000000000000000000000000000..0393bfc1aa2fa964c68e66af6da6f356ea84e29f
Binary files /dev/null and b/helm/dbrepo/charts/opensearch-1.2.2.tgz differ
diff --git a/helm/dbrepo/charts/opensearch-2.15.0.tgz b/helm/dbrepo/charts/opensearch-2.15.0.tgz
deleted file mode 100644
index 7d2f6efb43a2d44e8dfffde4e0265d302af2b2a6..0000000000000000000000000000000000000000
Binary files a/helm/dbrepo/charts/opensearch-2.15.0.tgz and /dev/null differ
diff --git a/helm/dbrepo/hack/generate-rabbitmq-pw.sh b/helm/dbrepo/hack/generate-rabbitmq-pw.sh
new file mode 100755
index 0000000000000000000000000000000000000000..4bccc93d7b0b9dc8a9e8b4116bcd6826f83f3f71
--- /dev/null
+++ b/helm/dbrepo/hack/generate-rabbitmq-pw.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+# https://stackoverflow.com/a/53175209/2634294
+# THIS SCRIPT REQUIRES xxd TO BE INSTALLED:
+#     DEBIAN: apt install xxd
+#     MACOS: brew install coreutils
+function encode_password()
+{
+    SALT=$(od -A n -t x -N 4 /dev/urandom)
+    PASS=$SALT$(echo -n $1 | xxd -ps | tr -d '\n' | tr -d ' ')
+    PASS=$(echo -n $PASS | xxd -r -p | sha256sum | head -c 128)
+    PASS=$(echo -n $SALT$PASS | xxd -r -p | base64 | tr -d '\n')
+    echo $PASS
+}
+encode_password $1
\ No newline at end of file
diff --git a/helm/dbrepo/templates/broker-secret.yaml b/helm/dbrepo/templates/broker-secret.yaml
index 9291cdbead49275baa472b9aecd9f7a83dc407d2..84a3088663e0eae7f937781f453fafa873437d10 100644
--- a/helm/dbrepo/templates/broker-secret.yaml
+++ b/helm/dbrepo/templates/broker-secret.yaml
@@ -34,7 +34,7 @@ stringData:
         {
           "configure": ".*",
           "read": ".*",
-          "user": "broker",
+          "user": "{{ .Values.brokerservice.auth.username }}",
           "vhost": "dbrepo",
           "write": ".*"
         }
@@ -60,8 +60,8 @@ stringData:
         {
           "hashing_algorithm": "rabbit_password_hashing_sha256",
           "limits": {},
-          "name": "broker",
-          "password_hash": "Sek6WxpX2L6UhxlwRkD0cnYAH5GbtTcCFq1yY/SCc1mAa0gB",
+          "name": "{{ .Values.brokerservice.auth.username }}",
+          "password_hash": "{{ .Values.brokerservice.auth.passwordHash }}",
           "tags": [
             "administrator"
           ]
diff --git a/helm/dbrepo/templates/metadata-configmap.yaml b/helm/dbrepo/templates/metadata-configmap.yaml
index 4bb2eb136b557c0a44f3a9fb77d8d6a023ab67ea..7965f0a3855c991291a30dcef43294226971aeeb 100644
--- a/helm/dbrepo/templates/metadata-configmap.yaml
+++ b/helm/dbrepo/templates/metadata-configmap.yaml
@@ -12,7 +12,7 @@ data:
   02-setup-data.sql: |
     BEGIN;
     INSERT INTO `mdb_containers` (name, internal_name, image_id, host, port, sidecar_host, sidecar_port, privileged_username, privileged_password)
-      VALUES ('MariaDB Galera 11.1.3', 'mariadb_11_1_3', 1, 'data-db', 3306, 'data-db', 80, 'root', 'dbrepo');
+      VALUES ('MariaDB 11.1.3', 'mariadb_11_1_3', 1, 'data-db', 3306, 'data-db', 80, 'root', 'dbrepo');
     COMMIT;
   01-setup-schema.sql: |
     BEGIN;
diff --git a/helm/dbrepo/templates/metadata-secret.yaml b/helm/dbrepo/templates/metadata-secret.yaml
index 3beda17fc57fe12ecb06021e3b81fe47e6a067f0..fe48d381ea4495ff1bd4b0c927190ef53275919c 100644
--- a/helm/dbrepo/templates/metadata-secret.yaml
+++ b/helm/dbrepo/templates/metadata-secret.yaml
@@ -15,7 +15,7 @@ stringData:
   AUTH_SERVICE_CLIENT: "{{ .Values.authservice.client.id }}"
   AUTH_SERVICE_CLIENT_SECRET: "{{ .Values.authservice.client.secret }}"
   AUTH_SERVICE_ENDPOINT: "{{ .Values.authservice.endpoint }}"
-  BASE_URL: "{{ .Values.hostname }}"
+  BASE_URL: "{{ .Values.gateway }}"
   BROKER_EXCHANGE_NAME: "{{ .Values.brokerservice.exchangeName }}"
   BROKER_HOST: "{{ .Values.brokerservice.host }}"
   BROKER_QUEUE_NAME: "{{ .Values.brokerservice.queueName }}"
@@ -33,11 +33,11 @@ stringData:
   GRANULARITY: "{{ .Values.metadataservice.granularity }}"
   JWT_PUBKEY: "{{ .Values.authservice.jwt.pubkey }}"
   LOG_LEVEL: "{{ ternary "trace" "info" .Values.metadataservice.image.debug }}"
-  METADATA_DB: "{{ .Values.metadatadb.db.name }}"
+  METADATA_DB: "{{ .Values.metadatadb.auth.database }}"
   METADATA_HOST: "{{ .Values.metadatadb.host }}"
   METADATA_JDBC_EXTRA_ARGS: "{{ .Values.metadatadb.jdbcExtraArgs }}"
-  METADATA_USERNAME: "{{ .Values.metadatadb.rootUser.user }}"
-  METADATA_PASSWORD: "{{ .Values.metadatadb.rootUser.password }}"
+  METADATA_USERNAME: "{{ .Values.metadatadb.auth.root }}"
+  METADATA_PASSWORD: "{{ .Values.metadatadb.auth.rootPassword }}"
   PID_BASE: "{{ $pidBase }}"
   REPOSITORY_NAME: "{{ .Values.metadataservice.repositoryName }}"
   SEARCH_SERVICE_ENDPOINT: "{{ .Values.searchservice.endpoint }}"
diff --git a/helm/dbrepo/values.yaml b/helm/dbrepo/values.yaml
index 15e6888d17c4df8e39de737cdd2ef6a946d3df1e..39d8517cfb4be46707f021274c3dfbdb12c45842 100644
--- a/helm/dbrepo/values.yaml
+++ b/helm/dbrepo/values.yaml
@@ -22,380 +22,282 @@ clusterDomain: cluster.local
 
 ## @section Internal Admin User
 
-## @param admin.username The internal admin username.
-## @param admin.password The internal admin password.
-##
 admin:
+  ## @param admin.username The internal admin username.
   username: admin
+  ## @param admin.password The internal admin password.
   password: admin
 
 ## @section Metadata Database
 
-## @param metadatadb.enabled Enable the Metadata Database.
-## @skip metadatadb.fullnameOverride
-## @param metadatadb.image.debug Set the logging level to `trace`. Otherwise, set to `info`.
-## @param metadatadb.host The hostname for the microservices.
-## @param metadatadb.rootUser.user The root username.
-## @param metadatadb.rootUser.password The root user password.
-## @param metadatadb.jdbcExtraArgs The extra arguments for JDBC connections in the microservices.
-## @param metadatadb.db.name The database name.
-## @skip metadatadb.metrics.enabled The Prometheus settings.
-## @skip metadatadb.galera The Galera settings.
-## @skip metadatadb.initdbScriptsConfigMap The initial database scripts.
-## @param metadatadb.extraInitDbScripts Additional init.db scripts that are executed on the first start.
-## @skip metadatadb.service The initial database scripts.
-## @param metadatadb.persistence.enabled Enable persistent storage. Requires PV-provisioner.
-## @param metadatadb.replicaCount The number of replicas, should be uneven (2n+1).
-##
 metadatadb:
+  ## @param metadatadb.enabled Enable the Metadata Database.
   enabled: true
+  ## @skip metadatadb.fullnameOverride
   fullnameOverride: metadata-db
-  image:
-    debug: false
+  ## @param metadatadb.host The hostname for the microservices.
   host: metadata-db
-  rootUser:
-    user: root
-    password: dbrepo
+  auth:
+    ## @param metadatadb.auth.root The root username.
+    root: root
+    ## @param metadatadb.auth.rootPassword The root user password.
+    rootPassword: dbrepo
+    ## @param metadatadb.auth.database The database name.
+    database: dbrepo
+    ## @param metadatadb.auth.replicationUser The database replication username.
+    replicationUser: replication
+    ## @param metadatadb.auth.replicationPassword The database replication user password
+    replicationPassword: replication
+  ## @param metadatadb.jdbcExtraArgs The extra arguments for JDBC connections in the microservices.
   jdbcExtraArgs: ""
-  db:
-    name: fda
   metrics:
+    ## @skip metadatadb.metrics.enabled The Prometheus settings.
     enabled: false
-  galera:
-    mariabackup:
-      user: mariabackup
-      password: mariabackup
+  ## @skip metadatadb.initdbScriptsConfigMap The initial database scripts.
   initdbScriptsConfigMap: metadata-db-setup
+  ## @param metadatadb.extraInitDbScripts Additional init.db scripts that are executed on the first start.
   extraInitDbScripts: { }
   #    03-additional-data.sql: |
   #      BEGIN;
   #      INSERT INTO `mdb_containers` (name, internal_name, image_id, host, port, sidecar_host, sidecar_port, privileged_username, privileged_password)
   #        VALUES ('MariaDB Galera TEST', 'mariadb_11_1_3', 1, 'data-db', 3306, 'data-db', 80, 'root', 'dbrepo');
   #      COMMIT;
-  service:
-    type: ClusterIP
-    annotations: { }
-    loadBalancerIP: ""
-    loadBalancerSourceRanges: [ ]
-  persistence:
-    enabled: false
-  replicaCount: 3
+  secondary:
+    ## @param metadatadb.secondary.replicaCount The number of replicas of the secondary database pods.
+    replicaCount: 2
 
 ## @section Auth Service
 
-## @param authservice.enabled Enable the Auth Service.
-## @skip authservice.fullnameOverride
-## @param authservice.image.debug Set the logging level to `trace`. Otherwise, set to `info`.
-## @param authservice.endpoint The hostname for the microservices.
-## @param authservice.auth.adminUser The admin username.
-## @param authservice.auth.adminPassword The admin user password.
-## @skip authservice.postgresql
-## @skip authservice.extraStartupArgs
-## @param authservice.jwt.pubkey The JWT public key from the `dbrepo-client`.
-## @param authservice.tls.enabled Enable TLS/SSL communication. Required for HTTPS.
-## @param authservice.tls.existingSecret The secret containing the `tls.crt`, `tls.key` and `ca.crt`.
-## @param authservice.tls.usePem Use PEM certificates as input instead of PKS12/JKS stores.
-## @param authservice.metrics.enabled Enable the Prometheus metrics export sidecar container.
-## @param authservice.client.id The client id for the microservices.
-## @param authservice.client.secret The client secret for the microservices.
-## @skip authservice.extraEnvVarsCM
-## @skip authservice.extraVolumes
-## @skip authservice.extraVolumeMounts
-## @skip authservice.replicaCount The number of replicas.
-##
 authservice:
+  ## @param authservice.enabled Enable the Auth Service.
   enabled: true
+  ## @skip authservice.fullnameOverride
   fullnameOverride: auth-service
   image:
+    ## @param authservice.image.debug Set the logging level to `trace`. Otherwise, set to `info`.
     debug: false
+  ## @param authservice.endpoint The hostname for the microservices.
   endpoint: http://auth-service
   auth:
+    ## @param authservice.auth.adminUser The admin username.
     adminUser: fda
+    ## @param authservice.auth.adminPassword The admin user password.
     adminPassword: fda
+  ## @skip authservice.postgresql
   postgresql:
     enabled: true
     auth:
       postgresPassword: postgres
+  ## @skip authservice.extraStartupArgs
   extraStartupArgs: "--import-realm"
   jwt:
+    ## @param authservice.jwt.pubkey The JWT public key from the `dbrepo-client`.
     pubkey: "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqqnHQ2BWWW9vDNLRCcxD++xZg/16oqMo/c1l+lcFEjjAIJjJp/HqrPYU/U9GvquGE6PbVFtTzW1KcKawOW+FJNOA3CGo8Q1TFEfz43B8rZpKsFbJKvQGVv1Z4HaKPvLUm7iMm8Hv91cLduuoWx6Q3DPe2vg13GKKEZe7UFghF+0T9u8EKzA/XqQ0OiICmsmYPbwvf9N3bCKsB/Y10EYmZRb8IhCoV9mmO5TxgWgiuNeCTtNCv2ePYqL/U0WvyGFW0reasIK8eg3KrAUj8DpyOgPOVBn3lBGf+3KFSYi+0bwZbJZWqbC/Xlk20Go1YfeJPRIt7ImxD27R/lNjgDO/MwIDAQAB"
   tls:
+    ## @param authservice.tls.enabled Enable TLS/SSL communication. Required for HTTPS.
     enabled: true
+    ## @param authservice.tls.existingSecret The secret containing the `tls.crt`, `tls.key` and `ca.crt`.
     existingSecret: ingress-cert
+    ## @skip authservice.tls.usePem
     usePem: true
   metrics:
+    ## @param authservice.metrics.enabled Enable the Prometheus metrics export sidecar container.
     enabled: false
   client:
+    ## @param authservice.client.id The client id for the microservices.
     id: dbrepo-client
+    ## @param authservice.client.secret The client secret for the microservices.
     secret: MUwRc7yfXSJwX8AdRMWaQC3Nep1VjwgG
+  ## @skip authservice.extraEnvVarsCM
   extraEnvVarsCM: auth-service-config
+  ## @skip authservice.extraVolumes
   extraVolumes:
     - name: config-map
       configMap:
         name: auth-service-config
+  ## @skip authservice.extraVolumeMounts
   extraVolumeMounts:
     - name: config-map
       mountPath: /opt/bitnami/keycloak/data/import
+  ## @skip authservice.replicaCount The number of replicas.
   replicaCount: 2
 
 ## @section Data Database
 
-## @param datadb.enabled Enable the Data Database.
-## @skip datadb.fullnameOverride
-## @param datadb.image.debug Set the logging level to `trace`. Otherwise, set to `info`.
-## @skip datadb.extraFlags
-## @param datadb.rootUser.user The root username.
-## @param datadb.rootUser.password The root user password.
-## @skip datadb.metrics.enabled The Prometheus settings.
-## @skip datadb.galera The Galera settings.
-## @skip datadb.service
-## @skip datadb.sidecars
-## @skip datadb.extraVolumeMounts
-## @skip datadb.extraVolumes
-## @param datadb.persistence.enabled Enable persistent storage. Requires PV-provisioner.
-## @param datadb.replicaCount The number of replicas, should be uneven (2n+1).
-##
 datadb:
+  ## @param datadb.enabled Enable the Data Database.
   enabled: true
+  ## @skip datadb.fullnameOverride
   fullnameOverride: data-db
   image:
+    ## @param datadb.image.debug Set the logging level to `trace`. Otherwise, set to `info`.
     debug: false
+  ## @skip datadb.extraFlags
   extraFlags: "--character-set-server=utf8mb4 --collation-server=utf8mb4_general_ci"
-  rootUser:
-    user: root
-    password: dbrepo
+  auth:
+    ## @param datadb.auth.rootPassword The root user password.
+    rootPassword: dbrepo
+    ## @param datadb.auth.replicationUser The database replication user password
+    replicationUser: replication
+    ## @param datadb.auth.replicationPassword The database replication user password
+    replicationPassword: replication
   metrics:
+    ## @skip datadb.metrics.enabled
     enabled: true
-  galera:
-    mariabackup:
-      user: mariabackup
-      password: mariabackup
-  service:
-    extraPorts:
-      - name: "sidecar"
-        port: 8080
-        targetPort: 8080
-        protocol: TCP
-  sidecars:
-    - name: sidecar
-      image: s210.dl.hpc.tuwien.ac.at/dbrepo/data-db-sidecar:1.4.4
-      imagePullPolicy: Always
-      securityContext:
-        runAsUser: 1001
-        runAsGroup: 0
-        runAsNonRoot: true
-        allowPrivilegeEscalation: false
-        seccompProfile:
-          type: RuntimeDefault
-        capabilities:
-          drop:
-            - ALL
-      ports:
+  ## @skip datadb.primary
+  primary:
+    service:
+      extraPorts:
         - name: "sidecar"
-          containerPort: 8080
+          port: 8080
+          targetPort: 8080
           protocol: TCP
-      envFrom:
-        - secretRef:
-            name: data-service-secret
-      livenessProbe:
-        exec:
-          command:
-            - /bin/bash
-            - -ec
-            - "curl -sSL localhost:8080/health | grep 'UP' || exit 1"
-        initialDelaySeconds: 120
-        periodSeconds: 30
-      readinessProbe:
-        exec:
-          command:
-            - /bin/bash
-            - -ec
-            - "curl -sSL localhost:8080/health | grep 'UP' || exit 1"
-        initialDelaySeconds: 30
-        periodSeconds: 30
-      volumeMounts:
-        - name: s3
-          mountPath: /s3
-  extraVolumeMounts:
-    - name: s3
-      mountPath: /s3
-  extraVolumes:
-    - name: s3
-      emptyDir: { }
-  persistence:
-    enabled: false
-  replicaCount: 3
+    sidecars:
+      - name: sidecar
+        image: registry.datalab.tuwien.ac.at/dbrepo/data-db-sidecar:1.4.4
+        imagePullPolicy: Always
+        securityContext:
+          runAsUser: 1001
+          runAsGroup: 0
+          runAsNonRoot: true
+          allowPrivilegeEscalation: false
+          seccompProfile:
+            type: RuntimeDefault
+          capabilities:
+            drop:
+              - ALL
+        ports:
+          - name: "sidecar"
+            containerPort: 8080
+            protocol: TCP
+        envFrom:
+          - secretRef:
+              name: data-service-secret
+        livenessProbe:
+          exec:
+            command:
+              - /bin/bash
+              - -ec
+              - "curl -sSL localhost:8080/health | grep 'UP' || exit 1"
+          initialDelaySeconds: 120
+          periodSeconds: 30
+        readinessProbe:
+          exec:
+            command:
+              - /bin/bash
+              - -ec
+              - "curl -sSL localhost:8080/health | grep 'UP' || exit 1"
+          initialDelaySeconds: 30
+          periodSeconds: 30
+        volumeMounts:
+          - name: s3
+            mountPath: /s3
+    extraVolumeMounts:
+      - name: s3
+        mountPath: /s3
+    extraVolumes:
+      - name: s3
+        emptyDir: { }
+    persistence:
+      enabled: false
+  ## @skip datadb.secondary
+  secondary:
+    replicaCount: 2
 
 ## @section Search Database
 
-## @param searchdb.enabled Enable the Search Database.
-## @skip searchdb.fullnameOverride
-## @param searchdb.host The hostname for the microservices.
-## @param searchdb.port The port for the microservices.
-## @skip searchdb.protocol
-## @param searchdb.username The admin username.
-## @param searchdb.password The admin user password.
-## @skip searchdb.clusterName
-## @skip searchdb.masterService
-## @param searchdb.replicas The number of replicas.
-## @skip searchdb.sysctlInit
-## @param searchdb.persistence.enabled Enable persistent storage. Requires PV-provisioner.
-## @skip searchdb.service
-## @skip searchdb.extraEnvs
-## @skip searchdb.extraVolumeMounts
-## @skip searchdb.extraVolumes
-## @skip searchdb.config
-##
 searchdb:
-  enabled: true
+  ## @param searchdb.enabled Enable the Data Database.
+  enabled: true.
+  ## @skip searchdb.fullnameOverride
   fullnameOverride: search-db
+  ## @skip searchdb.servicenameOverride
+  servicenameOverride: search-db
+  ## @param searchdb.host The hostname for the microservices.
   host: search-db
+  ## @param searchdb.port The port for the microservices.
   port: 9200
-  protocol: http
-  username: admin
-  password: admin
-  clusterName: search-db
-  masterService: search-db
-  replicas: 3
-  sysctlInit:
-    enabled: true
-  persistence:
+  ## @skip searchdb.security
+  security:
     enabled: false
-  service:
-    type: ClusterIP
-    annotations: { }
-    loadBalancerSourceRanges: [ ]
-  extraEnvs:
-    - name: DISABLE_INSTALL_DEMO_CONFIG
-      value: "true"
-  extraVolumeMounts:
-    - name: node-cert
-      mountPath: /usr/share/opensearch/config/tls
-      readOnly: true
-  extraVolumes:
-    - name: node-cert
-      secret:
-        secretName: search-db-secret
-  config:
-    opensearch.yml: |
-      cluster.name: search-db
-      network.host: 0.0.0.0
-      plugins:
-        security:
-          ssl:
-            transport:
-              pemcert_filepath: tls/tls.crt
-              pemkey_filepath: tls/tls.key
-              pemtrustedcas_filepath: tls/ca.crt
-              enforce_hostname_verification: false
-            http:
-              #enabled: true # uncomment to force ssl connections
-              pemcert_filepath: tls/tls.crt
-              pemkey_filepath: tls/tls.key
-              pemtrustedcas_filepath: tls/ca.crt
-          allow_unsafe_democertificates: false
-          allow_default_init_securityindex: true
-          authcz:
-            admin_dn:
-              - CN=search-db
-          nodes_dn:
-            - CN=search-db
-          audit.type: internal_opensearch
-          enable_snapshot_restore_privilege: true
-          check_snapshot_restore_write_privileges: true
-          restapi:
-            roles_enabled: [ "all_access", "security_rest_api_access" ]
-          system_indices:
-            enabled: true
-            indices:
-              [
-                ".opendistro-alerting-config",
-                ".opendistro-alerting-alert*",
-                ".opendistro-anomaly-results*",
-                ".opendistro-anomaly-detector*",
-                ".opendistro-anomaly-checkpoints",
-                ".opendistro-anomaly-detection-state",
-                ".opendistro-reports-*",
-                ".opendistro-notifications-*",
-                ".opendistro-notebooks",
-                ".opendistro-asynchronous-search-response*",
-              ]
+    adminPassword: admin
+  ## @param searchdb.clusterName The cluster name.
+  clusterName: search-db
 
 ## @section Upload Service
 
-## @param uploadservice.enabled Enable the Upload Service.
-## @skip uploadservice.fullnameOverride
-## @skip uploadservice.image
-## @skip uploadservice.containerArgs
-## @skip uploadservice.envFrom
-## @param uploadservice.replicaCount The number of replicas.
-##
 uploadservice:
+  ## @param uploadservice.enabled Enable the Upload Service.
   enabled: true
+  ## @skip uploadservice.fullnameOverride
   fullnameOverride: upload-service
+  ## @skip uploadservice.image
   image:
     repository: tusproject/tusd
     tag: v1.12
+  ## @skip uploadservice.securityContext
   securityContext:
     allowPrivilegeEscalation: false
+    runAsUser: 1000
+    runAsGroup: 1000
     runAsNonRoot: true
     seccompProfile:
       type: RuntimeDefault
     capabilities:
       drop:
         - ALL
+  ## @skip uploadservice.containerArgs
   containerArgs:
     - "--base-path=/api/upload/files/"
     - "-s3-endpoint=http://storageservice-s3:9000"
     - "-s3-bucket=dbrepo-upload"
+  ## @skip uploadservice.envFrom
   envFrom:
     - secretRef:
         name: upload-service-secret
+  ## @param uploadservice.replicaCount The number of replicas.
   replicaCount: 2
 
 ## @section Broker Service
 
-## @param brokerservice.enabled Enable the Broker Service.
-## @skip brokerservice.fullnameOverride
-## @skip brokerservice.image
-## @param brokerservice.endpoint The management api endpoint for the microservices.
-## @param brokerservice.host The hostname for the microservices.
-## @param brokerservice.port The port for the microservices.
-## @param brokerservice.virtualHost The default virtual host name.
-## @param brokerservice.queueName The default queue name.
-## @param brokerservice.exchangeName The default exchange name.
-## @param brokerservice.routingKey The default routing key binding from the default queue to the default exchange.
-## @param brokerservice.connectionTimeout The connection timeout in ms.
-## @skip brokerservice.auth
-## @skip brokerservice.extraConfiguration
-## @skip brokerservice.loadDefinition
-## @skip brokerservice.extraVolumes
-## @skip brokerservice.extraPlugins
-## @param brokerservice.persistence.enabled Enable persistent storage. Requires PV-provisioner.
-## @skip brokerservice.service
-## @param brokerservice.replicaCount The number of replicas.
-##
 brokerservice:
+  ## @param brokerservice.enabled Enable the Broker Service.
   enabled: true
+  ## @skip brokerservice.fullnameOverride
   fullnameOverride: broker-service
   image:
+    ## @param brokerservice.image.debug Set the logging level to `trace`. Otherwise, set to `info`.
     debug: true
+  ## @param brokerservice.endpoint The management api endpoint for the microservices.
   endpoint: http://broker-service:15672
+  ## @param brokerservice.host The hostname for the microservices.
   host: broker-service
+  ## @param brokerservice.port The port for the microservices.
   port: 5672
+  ## @param brokerservice.virtualHost The default virtual host name.
   virtualHost: dbrepo
+  ## @param brokerservice.queueName The default queue name.
   queueName: dbrepo
+  ## @param brokerservice.exchangeName The default exchange name.
   exchangeName: dbrepo
+  ## @param brokerservice.routingKey The default routing key binding from the default queue to the default exchange.
   routingKey: dbrepo.#
+  ## @param brokerservice.connectionTimeout The connection timeout in ms.
   connectionTimeout: 60000
   auth:
+    ## @skip brokerservice.auth.tls
     tls:
       enabled: false
       sslOptionsVerify: true
       failIfNoPeerCert: true
       existingSecret: ingress-cert
+    ## @param brokerservice.auth.username The initial administrator username.
     username: broker
+    ## @param brokerservice.auth.password The initial administrator user password.
     password: broker
+    ## @param brokerservice.auth.passwordHash The initial administrator user password has generated with [`generate-rabbitmq-pw.sh`](https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/blob/release-1.4.4/helm/dbrepo/hack/generate-rabbitmq-pw.sh).
+    passwordHash: 1gwjNNTBPKLgyzbsUykfR0JIFC6nNqbNJaxzZ14uPT8JGcTZ
+  ## @skip brokerservice.extraConfiguration
   extraConfiguration: |-
     default_vhost = dbrepo
     default_user_tags.administrator = true
@@ -419,174 +321,174 @@ brokerservice:
     management.oauth_client_secret = JEC2FexxrX4N65fLeDGukAl6R3Lc9y0u
     management.oauth_scopes = openid
     management.oauth_provider_url = https://example.com/api/auth/realms/dbrepo
+  ## @skip brokerservice.loadDefinition
   loadDefinition:
     enabled: true
     existingSecret: broker-service-secret
+  ## @skip brokerservice.extraVolumes
   extraVolumes:
     - name: secret-map
       secret:
         secretName: broker-service-secret
+  ## @param brokerservice.extraPlugins The list of plugins to be activated.
   extraPlugins: rabbitmq_prometheus rabbitmq_auth_backend_oauth2 rabbitmq_auth_mechanism_ssl
   persistence:
+    ## @param brokerservice.persistence.enabled If set to true, a PVC will be created.
     enabled: false
+  ## @skip brokerservice.service
   service:
     type: ClusterIP
     managerPortEnabled: true
     # loadBalancerIP:
-  replicaCount: 2
+  ## @param brokerservice.replicaCount The number of replicas.
+  replicaCount: 1
 
 ## @section Analyse Service
 
-## @param analyseservice.enabled Enable the Broker Service.
-## @skip analyseservice.image
-## @param analyseservice.endpoint The url of the endpoint.
-## @param analyseservice.s3.endpoint The S3-capable endpoint the microservice connects to.
-## @param analyseservice.replicaCount The number of replicas.
-##
 analyseservice:
+  ## @param analyseservice.enabled Enable the Broker Service.
   enabled: true
   image:
-    name: s210.dl.hpc.tuwien.ac.at/dbrepo/analyse-service:1.4.4
+    ## @skip analyseservice.image.name
+    name: registry.datalab.tuwien.ac.at/dbrepo/analyse-service:1.4.4
+    ## @skip analyseservice.image.pullPolicy
     pullPolicy: Always
+    ## @param analyseservice.image.debug Set the logging level to `trace`. Otherwise, set to `info`.
     debug: false
+  ## @param analyseservice.endpoint The url of the endpoint.
   endpoint: http://analyse-service
   s3:
+    ## @param analyseservice.s3.endpoint The S3-capable endpoint the microservice connects to.
     endpoint: http://storageservice-s3:9000
+  ## @param analyseservice.replicaCount The number of replicas.
   replicaCount: 2
 
 ## @section Metadata Service
 
-## @param metadataservice.enabled Enable the Metadata Service.
-## @skip metadataservice.image
-## @param metadataservice.endpoint The Metadata Service endpoint.
-## @param metadataservice.admin.email The OAI-PMH exposed admin e-mail.
-## @param metadataservice.deletedRecord The OAI-PMH exposed delete policy.
-## @param metadataservice.repositoryName The OAI-PMH exposed repository name.
-## @param metadataservice.granularity The OAI-PMH exposed record granularity.
-## @param metadataservice.datacite.enabled Enable the DataCite account for minting DOIs.
-## @param metadataservice.datacite.url The DataCite api endpoint url.
-## @param metadataservice.datacite.prefix The DataCite prefix.
-## @param metadataservice.datacite.username The DataCite api username.
-## @param metadataservice.datacite.password The DataCite api user password.
-## @param metadataservice.sparql.connectionTimeout The connection timeout for sparql queries fetching remote data in ms.
-## @param metadataservice.s3.endpoint The S3-capable endpoint the microservice connects to.
-## @skip metadataservice.s3.bucket
-## @param metadataservice.s3.auth.username The S3-capable endpoint username (or access key id).
-## @param metadataservice.s3.auth.password The S3-capable endpoint user password (or access key secret).
-## @param metadataservice.replicaCount The number of replicas.
-##
 metadataservice:
+  ## @param metadataservice.enabled Enable the Metadata Service.
   enabled: true
   image:
-    name: s210.dl.hpc.tuwien.ac.at/dbrepo/metadata-service:1.4.4
+    ## @skip metadataservice.image.name
+    name: registry.datalab.tuwien.ac.at/dbrepo/metadata-service:1.4.4
+    ## @skip metadataservice.image.pullPolicy
     pullPolicy: Always
+    ## @param metadataservice.image.debug Set the logging level to `trace`. Otherwise, set to `info`.
     debug: false
+  ## @param metadataservice.endpoint The Metadata Service endpoint.
   endpoint: http://metadata-service
   admin:
+    ## @param metadataservice.admin.email The OAI-PMH exposed e-mail for contacting the metadata records responsible person.
     email: noreply@example.com
+  ## @param metadataservice.deletedRecord The OAI-PMH exposed delete policy.
   deletedRecord: permanent
+  ## @param metadataservice.repositoryName The OAI-PMH exposed repository name.
   repositoryName: Database Repository
+  ## @param metadataservice.granularity The OAI-PMH exposed record granularity.
   granularity: YYYY-MM-DDThh:mm:ssZ
   datacite:
+    ## @param metadataservice.datacite.enabled If set to true, the service mints DOIs instead of local PIDs.
     enabled: false
+    ## @param metadataservice.datacite.url The DataCite api endpoint url.
     url: https://api.datacite.org
+    ## @param metadataservice.datacite.prefix The DataCite prefix.
     prefix: ""
+    ## @param metadataservice.datacite.username The DataCite api username.
     username: ""
+    ## @param metadataservice.datacite.password The DataCite api user password.
     password: ""
   sparql:
+    ## @param metadataservice.sparql.connectionTimeout The connection timeout for sparql queries fetching remote data in ms.
     connectionTimeout: 10000
   s3:
+    ## @param metadataservice.s3.endpoint The S3-capable endpoint the microservice connects to.
     endpoint: http://storageservice-s3:9000
+    ## @skip metadataservice.s3.bucket
     bucket:
       import: dbrepo-upload
       export: dbrepo-download
     auth:
+      ## @param metadataservice.s3.auth.username The S3-capable endpoint username (or access key id).
       username: seaweedfsadmin
+      ## @param metadataservice.s3.auth.password The S3-capable endpoint user password (or access key secret).
       password: seaweedfsadmin
+  ## @param metadataservice.replicaCount The number of replicas.
   replicaCount: 2
 
 ## @section Data Service
 
-## @param dataservice.enabled Enable the Metadata Service.
-## @param dataservice.endpoint The endpoint for the microservices.
-## @skip dataservice.image
-## @param dataservice.grant.read The default database permissions for users with read access.
-## @param dataservice.grant.write The default database permissions for users with write access.
-## @param dataservice.default.date The default date format id for dates.
-## @param dataservice.default.time The default date format id for times.
-## @param dataservice.default.timestamp The default date format id for timestamps.
-## @param dataservice.s3.endpoint The S3-capable endpoint the microservice connects to.
-## @skip dataservice.s3.bucket
-## @param dataservice.s3.auth.username The S3-capable endpoint username (or access key id).
-## @param dataservice.s3.auth.password The S3-capable endpoint user password (or access key secret).
-## @param dataservice.s3.filePath The local location to download/upload files from/to S3-capable endpoint.
-## @param dataservice.consumerConcurrentMin The minimum broker service consumer number.
-## @param dataservice.consumerConcurrentMax The maximum broker service consumer number.
-## @param dataservice.requeueRejected Enable re-queueing of rejected messages to the broker service.
-## @param dataservice.replicaCount The number of replicas.
-##
 dataservice:
+  ## @param dataservice.enabled Enable the Metadata Service.
   enabled: true
+  ## @param dataservice.endpoint The endpoint for the microservices.
   endpoint: http://data-service
   image:
-    name: s210.dl.hpc.tuwien.ac.at/dbrepo/data-service:1.4.4
+    ## @skip dataservice.image.name
+    name: registry.datalab.tuwien.ac.at/dbrepo/data-service:1.4.4
+    ## @skip dataservice.image.pullPolicy
     pullPolicy: Always
+    ## @param dataservice.image.debug Set the logging level to `trace`. Otherwise, set to `info`.
     debug: false
   grant:
+    ## @param dataservice.grant.read The default database permissions for users with read access.
     read: SELECT
+    ## @param dataservice.grant.write The default database permissions for users with write access.
     write: SELECT, CREATE, CREATE VIEW, CREATE ROUTINE, CREATE TEMPORARY TABLES, LOCK TABLES, INDEX, TRIGGER, INSERT, UPDATE, DELETE
   default:
+    ## @param dataservice.default.date The default date format id for dates. Default: YYYY-MM-dd (e.g. 2024-06-15).
     date: 3
+    ## @param dataservice.default.time The default date format id for times. Default: HH:mm:ss (e.g. 14:23:42).
     time: 4
+    ## @param dataservice.default.timestamp The default date format id for timestamps. Default: YYYY-MM-dd HH:mm:ss (e.g. 2024-06-15 14:23:42).
     timestamp: 1
   s3:
+    ## @param dataservice.s3.endpoint The S3-capable endpoint the microservice connects to.
     endpoint: http://storageservice-s3:9000
+    ## @skip dataservice.s3.bucket
     bucket:
       import: dbrepo-upload
       export: dbrepo-download
     auth:
+      ## @param dataservice.s3.auth.username The S3-capable endpoint username (or access key id).
       username: seaweedfsadmin
+      ## @param dataservice.s3.auth.password The S3-capable endpoint user password (or access key secret).
       password: seaweedfsadmin
+    ## @param dataservice.s3.filePath The local location to download/upload files from/to S3-capable endpoint.
     filePath: /s3
-  consumerConcurrentMin: 1
-  consumerConcurrentMax: 5
-  requeueRejected: false
+  ## @param dataservice.replicaCount The number of replicas.
   replicaCount: 2
 
 ## @section Search Service
 
-## @param searchservice.enabled Enable the Search Service.
-## @param searchservice.endpoint The endpoint for the microservices.
-## @skip searchservice.image
-## @skip searchservice.init
-## @param searchservice.replicaCount The number of replicas.
-##
 searchservice:
+  ## @param searchservice.enabled Enable the Search Service.
   enabled: true
+  ## @param searchservice.endpoint The endpoint for the microservices.
   endpoint: http://search-service
   image:
-    name: s210.dl.hpc.tuwien.ac.at/dbrepo/search-service:1.4.4
+    ## @skip searchservice.image.name
+    name: registry.datalab.tuwien.ac.at/dbrepo/search-service:1.4.4
+    ## @skip searchservice.image.pullPolicy
     pullPolicy: Always
+    ## @param searchservice.image.debug Set the logging level to `trace`. Otherwise, set to `info`.
     debug: false
+  ## @skip searchservice.init
   init:
     image:
-      name: s210.dl.hpc.tuwien.ac.at/dbrepo/search-service-init:1.4.4
+      name: registry.datalab.tuwien.ac.at/dbrepo/search-service-init:1.4.4
       pullPolicy: Always
+  ## @param searchservice.replicaCount The number of replicas.
   replicaCount: 2
 
 ## @section Storage Service
 
-## @param storageservice.enabled Enable the Storage Service.
-## @skip storageservice.master
-## @skip storageservice.filer
-## @skip storageservice.volume
-## @skip storageservice.s3
-## @skip storageservice.init
-##
 storageservice:
+  ## @param storageservice.enabled Enable the Storage Service.
   enabled: true
+  ## @skip storageservice.master
   master:
     enabled: true
+  ## @skip storageservice.filer
   filer:
     enabled: true
     replicas: 1
@@ -599,9 +501,11 @@ storageservice:
       enableAuth: true
       skipAuthSecretCreation: true
       existingConfigSecret: seaweedfs-s3-secret
+  ## @skip storageservice.volume
   volume:
     enabled: true
     replicas: 1
+  ## @skip storageservice.s3
   s3:
     enabled: true
     replicas: 2
@@ -616,55 +520,51 @@ storageservice:
     auth:
       username: seaweedfsadmin
       password: seaweedfsadmin
+  ## @skip storageservice.init
   init:
-    image: s210.dl.hpc.tuwien.ac.at/dbrepo/storage-service-init:1.4.4
+    image: registry.datalab.tuwien.ac.at/dbrepo/storage-service-init:1.4.4
     pullPolicy: Always
 
 ## @section User Interface
 
-## @param ui.enabled Enable the User Interface.
-## @skip ui.image
-## @param ui.public.api.client The endpoint for the client api.
-## @param ui.public.api.server The endpoint for the server api.
-## @param ui.public.title The user interface title.
-## @param ui.public.logo The user interface logo.
-## @param ui.public.icon The user interface icon.
-## @param ui.public.touch The user interface apple touch icon.
-## @param ui.public.broker.host The displayed broker hostname.
-## @param ui.public.broker.port.5671 Enable display of the broker 5671 port and mark it as secure (SSL/TLS).
-## @param ui.public.broker.port.5672 Enable display of the broker 5672 port and mark it as insecure (no SSL/TLS).
-## @param ui.public.broker.extra Extra metadata displayed.
-## @param ui.public.database.extra Extra metadata displayed.
-## @skip ui.public.links
-## @param ui.public.pid.default.publisher The default dataset publisher for persisted identifiers.
-## @param ui.public.doi.enabled Enable the display that DOIs are minted.
-## @param ui.public.doi.endpoint The DOI proxy.
-## @param ui.replicaCount The number of replicas.
-## @skip ui.extraVolumes
-## @skip ui.extraVolumeMounts
-##
 ui:
+  ## @param ui.enabled Enable the User Interface.
   enabled: true
   image:
-    name: s210.dl.hpc.tuwien.ac.at/dbrepo/ui:1.4.4
+    ## @skip ui.image.name
+    name: registry.datalab.tuwien.ac.at/dbrepo/ui:1.4.4
+    ## @skip ui.image.pullPolicy
     pullPolicy: Always
+    ## @param ui.image.debug Set the logging level to `trace`. Otherwise, set to `info`.
     debug: false
   public:
     api:
+      ## @param ui.public.api.client The endpoint for the client api.
       client: ""
+      ## @param ui.public.api.server The endpoint for the server api.
       server: ""
+    ## @param ui.public.title The user interface title.
     title: "Database Repository"
+    ## @param ui.public.logo The user interface logo.
     logo: "/logo.svg"
+    ## @param ui.public.icon The user interface icon.
     icon: "/favicon.ico"
+    ## @param ui.public.touch The user interface apple touch icon.
     touch: "/apple-touch-icon.png"
     broker:
+      ## @param ui.public.broker.host The displayed broker hostname.
       host: example.com
       port:
+        ## @param ui.public.broker.port.5671 Enable display of the broker 5671 port and mark it as secure (SSL/TLS).
         5671: true
+        ## @param ui.public.broker.port.5672 Enable display of the broker 5672 port and mark it as insecure (no SSL/TLS).
         5672: false
+      ## @param ui.public.broker.extra Extra metadata displayed.
       extra: ""
     database:
+      ## @param ui.public.database.extra Extra metadata displayed.
       extra: "128.130.0.0/15"
+    ## @skip ui.public.links
     links:
       rabbitmq:
         text: RabbitMQ Admin
@@ -674,15 +574,21 @@ ui:
         href: /api/auth/
     pid:
       default:
+        ## @param ui.public.pid.default.publisher The default dataset publisher for persisted identifiers.
         publisher: "Example University"
     doi:
+      ## @param ui.public.doi.enabled Enable the display that DOIs are minted.
       enabled: false
+      ## @param ui.public.doi.endpoint The DOI proxy.
       endpoint: https://doi.org
+  ## @param ui.replicaCount The number of replicas.
   replicaCount: 2
+  ## @skip ui.extraVolumes
   extraVolumes: [ ]
   #  - name: images-map
   #    configMap:
   #      name: ui-config
+  ## @skip ui.extraVolumeMounts
   extraVolumeMounts: [ ]
   #  - name: images-map
   #    mountPath: /static/logo.svg
@@ -690,32 +596,35 @@ ui:
 
 ## @section Ingress
 
-## @param ingress.enabled Enable the ingress.
-## @skip ingress.className
-## @skip ingress.tls
-## @skip ingress.annotations
-##
 ingress:
+  ## @param ingress.enabled Enable the ingress.
   enabled: false
+  ## @param ingress.className The ingress class name.
   className: nginx
   tls:
+    ## @param ingress.tls.enabled Enable the ingress.
     enabled: true
+    ## @param ingress.tls.secretName The secret holding the SSL/TLS certificate. Needs to have keys `tls.crt` and `tls.key` and optionally `ca.crt`.
     secretName: ingress-cert
   annotations:
+    ## @skip ingress.annotations.basic The ingress rules for proxying requests directly to services.
     basic: { }
     #      nginx.org/path-regex: "case_sensitive"
     #      nginx.ingress.kubernetes.io/use-regex: "true"
     #      cert-manager.io/cluster-issuer: letsencrypt-cluster-issuer
+    ## @skip ingress.annotations.rewriteApi The ingress rules for rewriting certain paths to /api/.
     rewriteApi:
       #      nginx.org/path-regex: "case_sensitive"
       #      cert-manager.io/cluster-issuer: letsencrypt-cluster-issuer
       nginx.ingress.kubernetes.io/use-regex: "true"
       nginx.ingress.kubernetes.io/rewrite-target: /api/$1
+    ## @skip ingress.annotations.rewriteRoot The ingress rules for rewriting certain paths to /.
     rewriteRoot:
       #      nginx.org/path-regex: "case_sensitive"
       #      cert-manager.io/cluster-issuer: letsencrypt-cluster-issuer
       nginx.ingress.kubernetes.io/use-regex: "true"
       nginx.ingress.kubernetes.io/rewrite-target: /$1
+    ## @skip ingress.annotations.rewriteRootSecure The ingress rules for rewriting certain paths to / and force SSL/TLS encrypted traffic.
     rewriteRootSecure:
       #      nginx.org/path-regex: "case_sensitive"
       #      cert-manager.io/cluster-issuer: letsencrypt-cluster-issuer
@@ -723,6 +632,7 @@ ingress:
       nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
       nginx.ingress.kubernetes.io/use-regex: "true"
       nginx.ingress.kubernetes.io/rewrite-target: /$1
+    ## @skip ingress.annotations.rewritePid The ingress rules for rewriting certain paths to /api/identifier/.
     rewritePid:
       #      nginx.org/path-regex: "case_sensitive"
       #      cert-manager.io/cluster-issuer: letsencrypt-cluster-issuer
diff --git a/install.sh b/install.sh
index 9850ccd35eaae4c71032655bbe72e97892aba0f1..a11a7c3b2380bca183e2a7987df405eef8212259 100644
--- a/install.sh
+++ b/install.sh
@@ -1,7 +1,7 @@
 #!/bin/bash
 
 # preset
-VERSION="latest"
+VERSION="1.4.4"
 MIN_CPU=8
 MIN_RAM=8
 MIN_MAP_COUNT=262144
@@ -59,7 +59,8 @@ fi
 echo "[🚀] Gathering environment ..."
 mkdir -p ./dist
 curl -sSL -o ./docker-compose.yml "https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/raw/release-${VERSION}/.docker/docker-compose.yml"
-curl -sSL -o ./dist/2_setup-data.sql "https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/raw/release-${VERSION}/dbrepo-metadata-db/2_setup-data.sql"
+curl -sSL -o ./dist/1_setup-schema.sql "https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/raw/release-${VERSION}/dbrepo-metadata-db/setup-schema.sql"
+curl -sSL -o ./dist/2_setup-data.sql "https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/raw/release-${VERSION}/dbrepo-metadata-db/setup-data.sql"
 curl -sSL -o ./dist/rabbitmq.conf "https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/raw/release-${VERSION}/dbrepo-broker-service/rabbitmq.conf"
 curl -sSL -o ./dist/enabled_plugins "https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/raw/release-${VERSION}/dbrepo-broker-service/enabled_plugins"
 curl -sSL -o ./dist/cert.pem "https://gitlab.phaidra.org/fair-data-austria-db-repository/fda-services/-/raw/release-${VERSION}/dbrepo-broker-service/cert.pem"
diff --git a/lib/python/dbrepo/RestClient.py b/lib/python/dbrepo/RestClient.py
index 6813acd095291306b53e75fbf08eaac597ce78bf..ae956dd07277830235c6d3ca991190766f8fd7de 100644
--- a/lib/python/dbrepo/RestClient.py
+++ b/lib/python/dbrepo/RestClient.py
@@ -1543,8 +1543,7 @@ class RestClient:
         raise ResponseCodeError(f'Failed to delete database access: response code: {response.status_code} is not '
                                 f'201 (CREATED): {response.text}')
 
-    def create_subset(self, database_id: int, query: str, page: int = 0, size: int = 10,
-                      timestamp: datetime.datetime = datetime.datetime.now()) -> Result:
+    def create_subset(self, database_id: int, query: str, page: int = 0, size: int = 10) -> Result:
         """
         Executes a SQL query in a database where the current user has at least read access with given database id. The
         result set can be paginated with setting page and size (both). Historic data can be queried by setting
@@ -1554,7 +1553,6 @@ class RestClient:
         :param query: The query statement.
         :param page: The result pagination number. Optional. Default: 0.
         :param size: The result pagination size. Optional. Default: 10.
-        :param timestamp: The query execution time. Optional. Default: now.
 
         :returns: The result set, if successful.
 
@@ -1569,8 +1567,8 @@ class RestClient:
         url = f'/api/database/{database_id}/subset'
         if page is not None and size is not None:
             url += f'?page={page}&size={size}'
-        response = self._wrapper(method="post", url=url, force_auth=True,
-                                 payload=ExecuteQuery(statement=query, timestamp=timestamp))
+        response = self._wrapper(method="post", url=url, force_auth=True, headers={"Accept": "application/json"},
+                                 payload=ExecuteQuery(statement=query))
         if response.status_code == 201:
             body = response.json()
             return Result.model_validate(body)
diff --git a/lib/python/dbrepo/api/dto.py b/lib/python/dbrepo/api/dto.py
index 4656a5220d919e14bb6cbf37b1ff7b016378ad94..20d7a8252354bd6b6523eb2026feeff27ed71967 100644
--- a/lib/python/dbrepo/api/dto.py
+++ b/lib/python/dbrepo/api/dto.py
@@ -720,7 +720,6 @@ class Unit(BaseModel):
 
 class ExecuteQuery(BaseModel):
     statement: str
-    timestamp: Timestamp
 
 
 class TitleType(str, Enum):
diff --git a/lib/python/docs/index.rst b/lib/python/docs/index.rst
index f905221999ac672b663611b248d6c75266c82e26..08ee924d833dc2543b932861cd110c27d57c85af 100644
--- a/lib/python/docs/index.rst
+++ b/lib/python/docs/index.rst
@@ -12,7 +12,7 @@ Quickstart
 ----------
 
 Find numerous quickstart examples on
-the `DBRepo website <https://www.ifs.tuwien.ac.at/infrastructures/dbrepo//usage-overview/>`_.
+the `DBRepo website <https://www.ifs.tuwien.ac.at/infrastructures/dbrepo/1.4.4/api/>`_.
 
 AMQP API Client
 -----------
diff --git a/mkdocs.yml b/mkdocs.yml
index ada8171b5076dc755d32c4fee00ca586ef997826..e2627e6b0d2368bd47dbb1511f8b7fc8a72a68d4 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -10,8 +10,8 @@ nav:
     - Welcome to DBRepo: index.md
     - Why use DBRepo: why.md
     - Help with DBRepo: help.md
-    - Installation: deployment-docker-compose.md
-    - Kubernetes: deployment-helm.md
+    - Installation: installation.md
+    - Kubernetes: kubernetes.md
     - Migration Guide: migration.md
     - contributing.md
   - Concepts:
diff --git a/values.schema.json b/values.schema.json
index 2cc52abfed3216d26a5e140292c82ffc2e023a8f..0e02517ab5ff9516cda74c7459dc8b587c6fa805 100644
--- a/values.schema.json
+++ b/values.schema.json
@@ -187,6 +187,9 @@
                         "password": {
                             "type": "string"
                         },
+                        "passwordHash": {
+                            "type": "string"
+                        },
                         "tls": {
                             "properties": {
                                 "enabled": {
@@ -314,60 +317,29 @@
         },
         "datadb": {
             "properties": {
+                "auth": {
+                    "properties": {
+                        "replicationPassword": {
+                            "type": "string"
+                        },
+                        "replicationUser": {
+                            "type": "string"
+                        },
+                        "rootPassword": {
+                            "type": "string"
+                        }
+                    },
+                    "type": "object"
+                },
                 "enabled": {
                     "type": "boolean"
                 },
                 "extraFlags": {
                     "type": "string"
                 },
-                "extraVolumeMounts": {
-                    "items": {
-                        "properties": {
-                            "mountPath": {
-                                "type": "string"
-                            },
-                            "name": {
-                                "type": "string"
-                            }
-                        },
-                        "type": "object"
-                    },
-                    "type": "array"
-                },
-                "extraVolumes": {
-                    "items": {
-                        "properties": {
-                            "emptyDir": {
-                                "properties": {},
-                                "type": "object"
-                            },
-                            "name": {
-                                "type": "string"
-                            }
-                        },
-                        "type": "object"
-                    },
-                    "type": "array"
-                },
                 "fullnameOverride": {
                     "type": "string"
                 },
-                "galera": {
-                    "properties": {
-                        "mariabackup": {
-                            "properties": {
-                                "password": {
-                                    "type": "string"
-                                },
-                                "user": {
-                                    "type": "string"
-                                }
-                            },
-                            "type": "object"
-                        }
-                    },
-                    "type": "object"
-                },
                 "image": {
                     "properties": {
                         "debug": {
@@ -384,208 +356,230 @@
                     },
                     "type": "object"
                 },
-                "persistence": {
+                "primary": {
                     "properties": {
-                        "enabled": {
-                            "type": "boolean"
-                        }
-                    },
-                    "type": "object"
-                },
-                "replicaCount": {
-                    "type": "integer"
-                },
-                "rootUser": {
-                    "properties": {
-                        "password": {
-                            "type": "string"
-                        },
-                        "user": {
-                            "type": "string"
-                        }
-                    },
-                    "type": "object"
-                },
-                "service": {
-                    "properties": {
-                        "extraPorts": {
+                        "extraVolumeMounts": {
                             "items": {
                                 "properties": {
-                                    "name": {
+                                    "mountPath": {
                                         "type": "string"
                                     },
-                                    "port": {
-                                        "type": "integer"
-                                    },
-                                    "protocol": {
+                                    "name": {
                                         "type": "string"
-                                    },
-                                    "targetPort": {
-                                        "type": "integer"
                                     }
                                 },
                                 "type": "object"
                             },
                             "type": "array"
-                        }
-                    },
-                    "type": "object"
-                },
-                "sidecars": {
-                    "items": {
-                        "properties": {
-                            "envFrom": {
-                                "items": {
-                                    "properties": {
-                                        "secretRef": {
-                                            "properties": {
-                                                "name": {
-                                                    "type": "string"
-                                                }
-                                            },
-                                            "type": "object"
-                                        }
-                                    },
-                                    "type": "object"
-                                },
-                                "type": "array"
-                            },
-                            "image": {
-                                "type": "string"
-                            },
-                            "imagePullPolicy": {
-                                "type": "string"
-                            },
-                            "livenessProbe": {
+                        },
+                        "extraVolumes": {
+                            "items": {
                                 "properties": {
-                                    "exec": {
-                                        "properties": {
-                                            "command": {
-                                                "items": {
-                                                    "type": "string"
-                                                },
-                                                "type": "array"
-                                            }
-                                        },
+                                    "emptyDir": {
+                                        "properties": {},
                                         "type": "object"
                                     },
-                                    "initialDelaySeconds": {
-                                        "type": "integer"
-                                    },
-                                    "periodSeconds": {
-                                        "type": "integer"
+                                    "name": {
+                                        "type": "string"
                                     }
                                 },
                                 "type": "object"
                             },
-                            "name": {
-                                "type": "string"
+                            "type": "array"
+                        },
+                        "persistence": {
+                            "properties": {
+                                "enabled": {
+                                    "type": "boolean"
+                                }
                             },
-                            "ports": {
-                                "items": {
-                                    "properties": {
-                                        "containerPort": {
-                                            "type": "integer"
-                                        },
-                                        "name": {
-                                            "type": "string"
+                            "type": "object"
+                        },
+                        "service": {
+                            "properties": {
+                                "extraPorts": {
+                                    "items": {
+                                        "properties": {
+                                            "name": {
+                                                "type": "string"
+                                            },
+                                            "port": {
+                                                "type": "integer"
+                                            },
+                                            "protocol": {
+                                                "type": "string"
+                                            },
+                                            "targetPort": {
+                                                "type": "integer"
+                                            }
                                         },
-                                        "protocol": {
-                                            "type": "string"
-                                        }
+                                        "type": "object"
                                     },
-                                    "type": "object"
-                                },
-                                "type": "array"
+                                    "type": "array"
+                                }
                             },
-                            "readinessProbe": {
+                            "type": "object"
+                        },
+                        "sidecars": {
+                            "items": {
                                 "properties": {
-                                    "exec": {
+                                    "envFrom": {
+                                        "items": {
+                                            "properties": {
+                                                "secretRef": {
+                                                    "properties": {
+                                                        "name": {
+                                                            "type": "string"
+                                                        }
+                                                    },
+                                                    "type": "object"
+                                                }
+                                            },
+                                            "type": "object"
+                                        },
+                                        "type": "array"
+                                    },
+                                    "image": {
+                                        "type": "string"
+                                    },
+                                    "imagePullPolicy": {
+                                        "type": "string"
+                                    },
+                                    "livenessProbe": {
                                         "properties": {
-                                            "command": {
-                                                "items": {
-                                                    "type": "string"
+                                            "exec": {
+                                                "properties": {
+                                                    "command": {
+                                                        "items": {
+                                                            "type": "string"
+                                                        },
+                                                        "type": "array"
+                                                    }
                                                 },
-                                                "type": "array"
+                                                "type": "object"
+                                            },
+                                            "initialDelaySeconds": {
+                                                "type": "integer"
+                                            },
+                                            "periodSeconds": {
+                                                "type": "integer"
                                             }
                                         },
                                         "type": "object"
                                     },
-                                    "initialDelaySeconds": {
-                                        "type": "integer"
+                                    "name": {
+                                        "type": "string"
                                     },
-                                    "periodSeconds": {
-                                        "type": "integer"
-                                    }
-                                },
-                                "type": "object"
-                            },
-                            "securityContext": {
-                                "properties": {
-                                    "allowPrivilegeEscalation": {
-                                        "type": "boolean"
+                                    "ports": {
+                                        "items": {
+                                            "properties": {
+                                                "containerPort": {
+                                                    "type": "integer"
+                                                },
+                                                "name": {
+                                                    "type": "string"
+                                                },
+                                                "protocol": {
+                                                    "type": "string"
+                                                }
+                                            },
+                                            "type": "object"
+                                        },
+                                        "type": "array"
                                     },
-                                    "capabilities": {
+                                    "readinessProbe": {
                                         "properties": {
-                                            "drop": {
-                                                "items": {
-                                                    "type": "string"
+                                            "exec": {
+                                                "properties": {
+                                                    "command": {
+                                                        "items": {
+                                                            "type": "string"
+                                                        },
+                                                        "type": "array"
+                                                    }
                                                 },
-                                                "type": "array"
+                                                "type": "object"
+                                            },
+                                            "initialDelaySeconds": {
+                                                "type": "integer"
+                                            },
+                                            "periodSeconds": {
+                                                "type": "integer"
                                             }
                                         },
                                         "type": "object"
                                     },
-                                    "runAsGroup": {
-                                        "type": "integer"
-                                    },
-                                    "runAsNonRoot": {
-                                        "type": "boolean"
-                                    },
-                                    "runAsUser": {
-                                        "type": "integer"
-                                    },
-                                    "seccompProfile": {
+                                    "securityContext": {
                                         "properties": {
-                                            "type": {
-                                                "type": "string"
+                                            "allowPrivilegeEscalation": {
+                                                "type": "boolean"
+                                            },
+                                            "capabilities": {
+                                                "properties": {
+                                                    "drop": {
+                                                        "items": {
+                                                            "type": "string"
+                                                        },
+                                                        "type": "array"
+                                                    }
+                                                },
+                                                "type": "object"
+                                            },
+                                            "runAsGroup": {
+                                                "type": "integer"
+                                            },
+                                            "runAsNonRoot": {
+                                                "type": "boolean"
+                                            },
+                                            "runAsUser": {
+                                                "type": "integer"
+                                            },
+                                            "seccompProfile": {
+                                                "properties": {
+                                                    "type": {
+                                                        "type": "string"
+                                                    }
+                                                },
+                                                "type": "object"
                                             }
                                         },
                                         "type": "object"
+                                    },
+                                    "volumeMounts": {
+                                        "items": {
+                                            "properties": {
+                                                "mountPath": {
+                                                    "type": "string"
+                                                },
+                                                "name": {
+                                                    "type": "string"
+                                                }
+                                            },
+                                            "type": "object"
+                                        },
+                                        "type": "array"
                                     }
                                 },
                                 "type": "object"
                             },
-                            "volumeMounts": {
-                                "items": {
-                                    "properties": {
-                                        "mountPath": {
-                                            "type": "string"
-                                        },
-                                        "name": {
-                                            "type": "string"
-                                        }
-                                    },
-                                    "type": "object"
-                                },
-                                "type": "array"
-                            }
-                        },
-                        "type": "object"
+                            "type": "array"
+                        }
                     },
-                    "type": "array"
+                    "type": "object"
+                },
+                "secondary": {
+                    "properties": {
+                        "replicaCount": {
+                            "type": "integer"
+                        }
+                    },
+                    "type": "object"
                 }
             },
             "type": "object"
         },
         "dataservice": {
             "properties": {
-                "consumerConcurrentMax": {
-                    "type": "integer"
-                },
-                "consumerConcurrentMin": {
-                    "type": "integer"
-                },
                 "default": {
                     "properties": {
                         "date": {
@@ -634,9 +628,6 @@
                 "replicaCount": {
                     "type": "integer"
                 },
-                "requeueRejected": {
-                    "type": "boolean"
-                },
                 "s3": {
                     "properties": {
                         "auth": {
@@ -762,9 +753,21 @@
         },
         "metadatadb": {
             "properties": {
-                "db": {
+                "auth": {
                     "properties": {
-                        "name": {
+                        "database": {
+                            "type": "string"
+                        },
+                        "replicationPassword": {
+                            "type": "string"
+                        },
+                        "replicationUser": {
+                            "type": "string"
+                        },
+                        "root": {
+                            "type": "string"
+                        },
+                        "rootPassword": {
                             "type": "string"
                         }
                     },
@@ -780,33 +783,9 @@
                 "fullnameOverride": {
                     "type": "string"
                 },
-                "galera": {
-                    "properties": {
-                        "mariabackup": {
-                            "properties": {
-                                "password": {
-                                    "type": "string"
-                                },
-                                "user": {
-                                    "type": "string"
-                                }
-                            },
-                            "type": "object"
-                        }
-                    },
-                    "type": "object"
-                },
                 "host": {
                     "type": "string"
                 },
-                "image": {
-                    "properties": {
-                        "debug": {
-                            "type": "boolean"
-                        }
-                    },
-                    "type": "object"
-                },
                 "initdbScriptsConfigMap": {
                     "type": "string"
                 },
@@ -821,42 +800,10 @@
                     },
                     "type": "object"
                 },
-                "persistence": {
+                "secondary": {
                     "properties": {
-                        "enabled": {
-                            "type": "boolean"
-                        }
-                    },
-                    "type": "object"
-                },
-                "replicaCount": {
-                    "type": "integer"
-                },
-                "rootUser": {
-                    "properties": {
-                        "password": {
-                            "type": "string"
-                        },
-                        "user": {
-                            "type": "string"
-                        }
-                    },
-                    "type": "object"
-                },
-                "service": {
-                    "properties": {
-                        "annotations": {
-                            "properties": {},
-                            "type": "object"
-                        },
-                        "loadBalancerIP": {
-                            "type": "string"
-                        },
-                        "loadBalancerSourceRanges": {
-                            "type": "array"
-                        },
-                        "type": {
-                            "type": "string"
+                        "replicaCount": {
+                            "type": "integer"
                         }
                     },
                     "type": "object"
@@ -975,66 +922,8 @@
                 "clusterName": {
                     "type": "string"
                 },
-                "config": {
-                    "properties": {
-                        "opensearch.yml": {
-                            "type": "string"
-                        }
-                    },
-                    "type": "object"
-                },
                 "enabled": {
-                    "type": "boolean"
-                },
-                "extraEnvs": {
-                    "items": {
-                        "properties": {
-                            "name": {
-                                "type": "string"
-                            },
-                            "value": {
-                                "type": "string"
-                            }
-                        },
-                        "type": "object"
-                    },
-                    "type": "array"
-                },
-                "extraVolumeMounts": {
-                    "items": {
-                        "properties": {
-                            "mountPath": {
-                                "type": "string"
-                            },
-                            "name": {
-                                "type": "string"
-                            },
-                            "readOnly": {
-                                "type": "boolean"
-                            }
-                        },
-                        "type": "object"
-                    },
-                    "type": "array"
-                },
-                "extraVolumes": {
-                    "items": {
-                        "properties": {
-                            "name": {
-                                "type": "string"
-                            },
-                            "secret": {
-                                "properties": {
-                                    "secretName": {
-                                        "type": "string"
-                                    }
-                                },
-                                "type": "object"
-                            }
-                        },
-                        "type": "object"
-                    },
-                    "type": "array"
+                    "type": "string"
                 },
                 "fullnameOverride": {
                     "type": "string"
@@ -1042,53 +931,21 @@
                 "host": {
                     "type": "string"
                 },
-                "masterService": {
-                    "type": "string"
-                },
-                "password": {
-                    "type": "string"
-                },
-                "persistence": {
-                    "properties": {
-                        "enabled": {
-                            "type": "boolean"
-                        }
-                    },
-                    "type": "object"
-                },
                 "port": {
                     "type": "integer"
                 },
-                "protocol": {
-                    "type": "string"
-                },
-                "replicas": {
-                    "type": "integer"
-                },
-                "service": {
+                "security": {
                     "properties": {
-                        "annotations": {
-                            "properties": {},
-                            "type": "object"
-                        },
-                        "loadBalancerSourceRanges": {
-                            "type": "array"
-                        },
-                        "type": {
+                        "adminPassword": {
                             "type": "string"
-                        }
-                    },
-                    "type": "object"
-                },
-                "sysctlInit": {
-                    "properties": {
+                        },
                         "enabled": {
                             "type": "boolean"
                         }
                     },
                     "type": "object"
                 },
-                "username": {
+                "servicenameOverride": {
                     "type": "string"
                 }
             },
@@ -1450,6 +1307,42 @@
                 },
                 "replicaCount": {
                     "type": "integer"
+                },
+                "securityContext": {
+                    "properties": {
+                        "allowPrivilegeEscalation": {
+                            "type": "boolean"
+                        },
+                        "capabilities": {
+                            "properties": {
+                                "drop": {
+                                    "items": {
+                                        "type": "string"
+                                    },
+                                    "type": "array"
+                                }
+                            },
+                            "type": "object"
+                        },
+                        "runAsGroup": {
+                            "type": "integer"
+                        },
+                        "runAsNonRoot": {
+                            "type": "boolean"
+                        },
+                        "runAsUser": {
+                            "type": "integer"
+                        },
+                        "seccompProfile": {
+                            "properties": {
+                                "type": {
+                                    "type": "string"
+                                }
+                            },
+                            "type": "object"
+                        }
+                    },
+                    "type": "object"
                 }
             },
             "type": "object"