Skip to content

Commit

Permalink
Merge branch 'master' into feature/#263-Hide-non-active-endpoints-v2-…
Browse files Browse the repository at this point in the history
…from-swagger
  • Loading branch information
TebaleloS authored Sep 30, 2024
2 parents 7b8551c + 2c1a107 commit e281c73
Show file tree
Hide file tree
Showing 77 changed files with 152 additions and 145 deletions.
3 changes: 3 additions & 0 deletions .sbtrc
Original file line number Diff line number Diff line change
Expand Up @@ -27,3 +27,6 @@ alias testIT=; testOnly *IntegrationTests

# Project specific aliases
alias testDB=; project database; testOnly *

# Run all tests
alias testAll=; testOnly *
12 changes: 9 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -181,22 +181,28 @@ See the commands configured in the `.sbtrc` [(link)](https://www.scala-sbt.org/1

### Run Unit Tests
Use the `test` command to execute all unit tests, skipping all other types of tests.
```
```sbt
sbt test
```

### Run Integration Tests
Use the `testIT` command to execute all Integration tests, skipping all other test types.
```
```sbt
sbt testIT
```

Use the `testDB` command to execute all Integration tests in `database` module, skipping all other tests and modules.
- Hint: project custom command
```
```sbt
sbt testDB
```

If you want to run all tests, use the following command.
- Hint: project custom command
```sbt
sbt testAll
```


## How to Release

Expand Down
1 change: 1 addition & 0 deletions build.sbt
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ flywayUser := FlywayConfiguration.flywayUser
flywayPassword := FlywayConfiguration.flywayPassword
flywayLocations := FlywayConfiguration.flywayLocations
flywaySqlMigrationSuffixes := FlywayConfiguration.flywaySqlMigrationSuffixes
flywayBaselineVersion := FlywayConfiguration.flywayBaselineVersion
libraryDependencies ++= flywayDependencies

/**
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
/*
* Copyright 2021 ABSA Group Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

DROP FUNCTION IF EXISTS validation.is_partitioning_valid(JSONB, BOOL);
DROP FUNCTION IF EXISTS validation.validate_partitioning(JSONB, BOOL);

DROP FUNCTION IF EXISTS runs._get_id_partitioning(jsonb, boolean);
DROP FUNCTION IF EXISTS runs._write_measurement(uuid, bigint, text, text[], jsonb, text);
DROP FUNCTION IF EXISTS runs.create_partitioning_if_not_exists(jsonb, text, jsonb);
DROP FUNCTION IF EXISTS runs.write_checkpoint(jsonb, uuid, text, timestamp with time zone, timestamp with time zone, jsonb[], boolean, text);

DROP FUNCTION IF EXISTS flows._add_to_parent_flows(bigint, bigint, text);
DROP FUNCTION IF EXISTS flows._create_flow(bigint, text);
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ BEGIN
RETURN nextval('global_id_seq');
END;
$$
LANGUAGE plpgsql VOLATILE SECURITY DEFINER;
LANGUAGE plpgsql VOLATILE SECURITY DEFINER;

GRANT EXECUTE ON FUNCTION public.global_id() TO PUBLIC;

39 changes: 0 additions & 39 deletions database/src/main/postgres/public/jsonb_array_to_text_array.sql

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ CREATE OR REPLACE FUNCTION runs.write_checkpoint(
OUT status_text TEXT
) RETURNS record AS
$$
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
--
-- Function: runs.write_checkpoint(8)
-- Creates a checkpoint and adds all the measurements that it consists of
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ CREATE OR REPLACE FUNCTION runs.get_partitioning_checkpoint_v2(
)
RETURNS SETOF record AS
$$
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
--
-- Function: runs.get_partitioning_checkpoint_v2(BIGINT, UUID)
-- Retrieves a single checkpoint (measures and their measurement details) related to a
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ CREATE OR REPLACE FUNCTION runs.get_partitioning_additional_data(
OUT ad_author TEXT
) RETURNS SETOF record AS
$$
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
--
-- Function: runs.get_partitioning_additional_data(1)
-- Returns additional data for the given partitioning
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ CREATE OR REPLACE FUNCTION runs.get_partitioning_main_flow(
OUT from_pattern BOOLEAN
) RETURNS record AS
$$
-------------------------------------------------------------------------------
-------------------------------------------------------------------------------
--
-- Function: runs.get_partitioning_main_flow(1)
-- Returns the main flow for the given partitioning
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,9 @@
*/

CREATE OR REPLACE FUNCTION validation.validate_partitioning(
IN i_partitioning JSONB,
IN i_strict_check BOOLEAN = true
IN i_partitioning JSONB,
IN i_strict_check BOOLEAN = true,
OUT error_message TEXT
) RETURNS SETOF TEXT AS
$$
-------------------------------------------------------------------------------
Expand All @@ -26,15 +27,15 @@ $$
-- The validation performs:
-- 1) Correct structure of the input JSONB object
-- 2) The list of keys in 'keys' is unique and doesn't have NULLs
-- 3) (if i_strict_check = true) The keys in 'keys' and in 'keysToValues' correspond to each other
-- 4) (if i_strict_check = true) The values in 'keysToValues' are non-null
-- 3) (if i_strict_check = true) The keys in 'keys' and in 'keysToValuesMap' correspond to each other
-- 4) (if i_strict_check = true) The values in 'keysToValuesMap' are non-null
--
-- Parameters:
-- i_partitioning - partitioning to validate, a valid example:
-- {
-- "keys": ["one", "two", "three"],
-- "version": 1,
-- "keysToValues": {
-- "keysToValuesMap": {
-- "one": "DatasetA",
-- "two": "Version1",
-- "three": "2022-12-20"
Expand All @@ -49,7 +50,7 @@ $$
--
-------------------------------------------------------------------------------
DECLARE
_mandatory_fields_in_input CONSTANT TEXT[] := ARRAY['keys', 'version', 'keysToValues'];
_mandatory_fields_in_input CONSTANT TEXT[] := ARRAY['keys', 'version', 'keysToValuesMap'];
_all_fields_in_input TEXT[];

_is_input_properly_structured BOOL;
Expand All @@ -70,22 +71,24 @@ BEGIN
IF NOT _is_input_properly_structured THEN
SELECT array_agg(X.keys)
FROM (
SELECT jsonb_object_keys(i_partitioning) AS keys
) AS X
SELECT jsonb_object_keys(i_partitioning) AS keys
) AS X
INTO _all_fields_in_input;

RETURN NEXT
'The input partitioning is not properly structured, it should have this structure: '
|| _mandatory_fields_in_input::TEXT
|| ' but contains: '
|| _all_fields_in_input::TEXT;
error_message :=
'The input partitioning is not properly structured, it should have this structure: '
|| _mandatory_fields_in_input::TEXT
|| ' but contains: '
|| _all_fields_in_input::TEXT;
RETURN NEXT;
END IF;

SELECT CAST(i_partitioning->>'version' AS INTEGER)
INTO _version;

IF _version != 1 THEN
RETURN NEXT 'The input partitioning is not of the supported version. Should be 1, but it is: ' || _version;
error_message := 'The input partitioning is not of the supported version. Should be 1, but it is: ' || _version;
RETURN NEXT;
END IF;

-- Checking whether the array 'keys' is valid, i.e. has unique, non-null elements.
Expand All @@ -94,55 +97,58 @@ BEGIN

SELECT array_agg(X.keys), count(1)
FROM (
SELECT DISTINCT(JAE.value) AS keys
FROM jsonb_array_elements_text(i_partitioning->'keys') AS JAE
WHERE JAE.value IS NOT NULL
) AS X
SELECT DISTINCT(JAE.value) AS keys
FROM jsonb_array_elements_text(i_partitioning->'keys') AS JAE
WHERE JAE.value IS NOT NULL
) AS X
INTO _partitioning_keys_uniq_and_not_null, _partitioning_keys_uniq_and_not_null_cnt;

IF _partitioning_keys_all_cnt != _partitioning_keys_uniq_and_not_null_cnt THEN
RETURN NEXT 'The input partitioning is invalid, the keys must be unique and can not contain NULLs: '
error_message := 'The input partitioning is invalid, the keys must be unique and can not contain NULLs: '
|| (i_partitioning->>'keys');
RETURN NEXT;
END IF;

-- Checking whether the map 'keysToValues' has the same keys as the 'keys' attribute.
-- Checking whether the map 'keysToValuesMap' has the same keys as the 'keys' attribute.
IF i_strict_check THEN
SELECT array_agg(X.keys)
FROM (
SELECT jsonb_object_keys(i_partitioning->'keysToValues') AS keys
) AS X
SELECT jsonb_object_keys(i_partitioning->'keysToValuesMap') AS keys
) AS X
INTO _partitioning_keys_from_values_map;

IF NOT (
(_partitioning_keys_from_values_map @> _partitioning_keys_uniq_and_not_null)
AND (_partitioning_keys_from_values_map <@ _partitioning_keys_uniq_and_not_null)
) THEN

RETURN NEXT
'The input partitioning is invalid, the keys in ''keys'' and ''keysToValues'' do not correspond. '
|| 'Given in ''keysToValues'': '
|| _partitioning_keys_from_values_map::TEXT
|| ' vs (probably expected from ''keys''): '
|| _partitioning_keys_uniq_and_not_null::TEXT;
AND (_partitioning_keys_from_values_map <@ _partitioning_keys_uniq_and_not_null)
) THEN

error_message :=
'The input partitioning is invalid, the keys in ''keys'' and ''keysToValuesMap'' do not correspond. '
|| 'Given in ''keysToValuesMap'': '
|| _partitioning_keys_from_values_map::TEXT
|| ' vs (probably expected from ''keys''): '
|| _partitioning_keys_uniq_and_not_null::TEXT;
RETURN NEXT;
END IF;
END IF;

-- Checking the validity of values in the map 'keysToValues',
-- Checking the validity of values in the map 'keysToValuesMap',
-- non-pattern-like partitioning can't have null values there.
IF i_strict_check THEN
PERFORM 1
FROM jsonb_each_text(i_partitioning->'keysToValues') AS elem
FROM jsonb_each_text(i_partitioning->'keysToValuesMap') AS elem
WHERE elem.value IS NULL;

IF found THEN
RETURN NEXT 'The input partitioning is invalid, some values in ''keysToValues'' are NULLs: '
|| (i_partitioning->>'keysToValues');
error_message := 'The input partitioning is invalid, some values in ''keysToValuesMap'' are NULLs: '
|| (i_partitioning->>'keysToValuesMap');
RETURN NEXT;
END IF;
END IF;

RETURN;
END;
$$
LANGUAGE plpgsql IMMUTABLE SECURITY DEFINER;
LANGUAGE plpgsql IMMUTABLE SECURITY DEFINER;

ALTER FUNCTION validation.validate_partitioning(JSONB, BOOL) OWNER TO atum_owner;
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,9 @@
*/

CREATE OR REPLACE FUNCTION validation.is_partitioning_valid(
IN i_partitioning JSONB,
IN i_strict_check BOOLEAN = true
IN i_partitioning JSONB,
IN i_strict_check BOOLEAN = true,
OUT is_valid BOOLEAN
) RETURNS BOOLEAN AS
$$
-------------------------------------------------------------------------------
Expand All @@ -41,18 +42,21 @@ $$
--
-------------------------------------------------------------------------------
DECLARE

_errors TEXT;
_count INTEGER;
BEGIN
PERFORM validation.validate_partitioning(i_partitioning, i_strict_check)
LIMIT 1;
SELECT string_agg(VP.error_message, E',\n'), count(1)
FROM validation.validate_partitioning(i_partitioning, i_strict_check) VP
INTO _errors, _count;

IF found THEN
RAISE EXCEPTION 'The input partitioning is not valid: %', jsonb_pretty(i_partitioning);
IF _count > 0 THEN
RAISE EXCEPTION E'The input partitioning is not valid: %\nDue to issue(s):\n%', jsonb_pretty(i_partitioning), _errors ;
END IF;

RETURN TRUE;
is_valid := TRUE;
RETURN;
END;
$$
LANGUAGE plpgsql IMMUTABLE SECURITY DEFINER;
LANGUAGE plpgsql IMMUTABLE SECURITY DEFINER;

ALTER FUNCTION validation.is_partitioning_valid(JSONB, BOOL) OWNER TO atum_owner;
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class GetFlowCheckpointsIntegrationTests extends DBTestSuite {
|{
| "version": 1,
| "keys": ["keyX", "keyY", "keyZ"],
| "keysToValues": {
| "keysToValuesMap": {
| "keyX": "value1",
| "keyZ": "value3",
| "keyY": "value2"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class GetFlowPartitioningsIntegrationTests extends DBTestSuite {
|{
| "version": 1,
| "keys": ["keyA", "keyB", "keyC"],
| "keysToValues": {
| "keysToValuesMap": {
| "keyA": "valueA",
| "keyB": "valueB",
| "keyC": "valueC"
Expand All @@ -48,7 +48,7 @@ class GetFlowPartitioningsIntegrationTests extends DBTestSuite {
|{
| "version": 1,
| "keys": ["keyA", "keyB"],
| "keysToValues": {
| "keysToValuesMap": {
| "keyA": "valueA",
| "keyB": "valueB"
| }
Expand All @@ -61,7 +61,7 @@ class GetFlowPartitioningsIntegrationTests extends DBTestSuite {
|{
| "version": 1,
| "keys": ["keyD", "keyE", "keyF"],
| "keysToValues": {
| "keysToValuesMap": {
| "keyD": "valueD",
| "keyE": "valueE",
| "keyF": "valueF"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class CreateOrUpdateAdditionalDataIntegrationTests extends DBTestSuite {
|{
| "version": 1,
| "keys": ["key1", "key2", "key3"],
| "keysToValues": {
| "keysToValuesMap": {
| "key1": "valueA",
| "key2": "valueB",
| "key3": "valueC"
Expand Down
Loading

0 comments on commit e281c73

Please sign in to comment.