Add CM Write Data Job Scenario to K6 Test Suite 91/140891/6
authorsourabh_sourabh <sourabh.sourabh@est.tech>
Thu, 15 May 2025 15:46:58 +0000 (16:46 +0100)
committersourabh_sourabh <sourabh.sourabh@est.tech>
Wed, 4 Jun 2025 12:01:51 +0000 (13:01 +0100)
- Added 'write_data_job_scenario' in the K6 test configuration for small
  and large data size.
- It helps to simulate lower / higher load and monitor system behavior under infrequent
  write operations.
- CSV report and other alignment.
- Created a test metadata json to refer from all the places.
- Script to generate trends and thresholds

Issue-ID: CPS-2716
Change-Id: I4ba5728a2738c3454f1652d6db88ad599f2192c9
Signed-off-by: sourabh_sourabh <sourabh.sourabh@est.tech>
12 files changed:
cps-ncmp-service/src/main/java/org/onap/cps/ncmp/impl/datajobs/WriteRequestExaminer.java
docker-compose/config/nginx/nginx.conf
k6-tests/make-logs.sh
k6-tests/ncmp/common/cmhandle-crud.js
k6-tests/ncmp/common/passthrough-crud.js
k6-tests/ncmp/common/utils.js
k6-tests/ncmp/common/write-data-job.js [new file with mode: 0644]
k6-tests/ncmp/config/endurance.json
k6-tests/ncmp/config/kpi.json
k6-tests/ncmp/config/test-kpi-metadata.json [new file with mode: 0644]
k6-tests/ncmp/ncmp-test-runner.js
k6-tests/ncmp/run-all-tests.sh

index 9cb3517..e6c3db8 100644 (file)
@@ -92,7 +92,7 @@ public class WriteRequestExaminer {
 
     private Map<String, YangModelCmHandle> preloadCmHandles(final DataJobWriteRequest dataJobWriteRequest) {
         final Collection<String> uniquePaths
-            = dataJobWriteRequest.data().stream().map(operation -> operation.path()).collect(Collectors.toSet());
+            = dataJobWriteRequest.data().stream().map(WriteOperation::path).collect(Collectors.toSet());
         final Collection<String> cmHandleIds
             = alternateIdMatcher.getCmHandleIdsByLongestMatchingAlternateIds(uniquePaths, PATH_SEPARATOR);
         final Collection<YangModelCmHandle> yangModelCmHandles
index e6eab37..cb0d770 100644 (file)
@@ -26,7 +26,7 @@ http {
     }
 
     # Set the max allowed size of the incoming request
-    client_max_body_size 2m;
+    client_max_body_size 25m;
 
     server {
         listen 80;
index f3343b6..56907fd 100644 (file)
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-SERVICE_NAMES=("cps-and-ncmp" "dbpostgresql")
-TIMESTAMP=$(date +"%Y%m%d%H%M%S")
-LOG_DIR="${WORKSPACE:-.}/logs"
+
+set -euo pipefail
+
+# Constants
+readonly LOG_DIR="${WORKSPACE:-.}/logs"
+readonly LOG_RETENTION_DAYS=14
+readonly TIMESTAMP=$(date +"%Y%m%d%H%M%S")
+readonly SERVICES_TO_BE_LOGGED=("cps-and-ncmp" "ncmp-dmi-plugin-demo-and-csit-stub" "dbpostgresql")
+
+# Ensure log directory exists
 mkdir -p "$LOG_DIR"
-# Store logs for each service's containers and zip them individually
-for SERVICE_NAME in "${SERVICE_NAMES[@]}"; do
-    TEMP_DIR="$LOG_DIR/temp_${SERVICE_NAME}_$TIMESTAMP"
-    ZIP_FILE="$LOG_DIR/logs_${SERVICE_NAME}_$TIMESTAMP.zip"
-    mkdir -p "$TEMP_DIR"
-    CONTAINER_IDS=$(docker ps --filter "name=$SERVICE_NAME" --format "{{.ID}}")
-    for CONTAINER_ID in $CONTAINER_IDS; do
-        CONTAINER_NAME=$(docker inspect --format="{{.Name}}" "$CONTAINER_ID" | sed 's/\///g')
-        LOG_FILE="$TEMP_DIR/${CONTAINER_NAME}_logs_$TIMESTAMP.log"
-        docker logs "$CONTAINER_ID" > "$LOG_FILE"
-    done
-    # Zip the logs for the current service
-    zip -r "$ZIP_FILE" "$TEMP_DIR"
-    echo "Logs for service $SERVICE_NAME saved to $ZIP_FILE"
-    # Clean temp files for the current service
-    rm -r "$TEMP_DIR"
+
+# Function to fetch logs from a container
+fetch_container_logs() {
+  local container_id="$1"
+  local container_name
+  container_name=$(docker inspect --format="{{.Name}}" "$container_id" | sed 's/\///g')
+  local log_file="$2/${container_name}_logs_$TIMESTAMP.log"
+  docker logs "$container_id" > "$log_file"
+}
+
+# Function to archive logs for a service
+archive_service_logs() {
+  local service_name="$1"
+  local temp_dir="$2"
+  local zip_file="$3"
+
+  mkdir -p "$temp_dir"
+
+  local container_ids
+  container_ids=$(docker ps --filter "name=$service_name" --format "{{.ID}}")
+
+  for container_id in $container_ids; do
+    fetch_container_logs "$container_id" "$temp_dir"
+  done
+
+  zip -r "$zip_file" "$temp_dir"
+  echo "Logs for service '$service_name' saved to $zip_file"
+
+  rm -r "$temp_dir"
+}
+
+# Main process
+for service_name in "${SERVICES_TO_BE_LOGGED[@]}"; do
+  temp_dir="$LOG_DIR/temp_${service_name}_$TIMESTAMP"
+  zip_file="$LOG_DIR/logs_${service_name}_$TIMESTAMP.zip"
+
+  archive_service_logs "$service_name" "$temp_dir" "$zip_file"
 done
-# Delete logs older than 2 weeks
-find "$LOG_DIR" -name "logs_*.zip" -mtime +14 -delete
\ No newline at end of file
+
+# Clean up old logs
+find "$LOG_DIR" -name "logs_*.zip" -mtime +$LOG_RETENTION_DAYS -delete
index 3b6c3ff..e36a1ab 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *  ============LICENSE_START=======================================================
- *  Copyright (C) 2024 Nordix Foundation
+ *  Copyright (C) 2024-2025 OpenInfra Foundation Europe. All rights reserved.
  *  ================================================================================
  *  Licensed under the Apache License, Version 2.0 (the "License");
  *  you may not use this file except in compliance with the License.
@@ -19,7 +19,8 @@
  */
 
 import { sleep } from 'k6';
-import { performPostRequest, NCMP_BASE_URL, DMI_PLUGIN_URL, TOTAL_CM_HANDLES, MODULE_SET_TAGS
+import {
+    performPostRequest, getAlternateId, NCMP_BASE_URL, DMI_PLUGIN_URL, TOTAL_CM_HANDLES, MODULE_SET_TAGS
 } from './utils.js';
 import { executeCmHandleIdSearch } from './search-base.js';
 
@@ -53,19 +54,20 @@ function createCmHandlePayload(cmHandleIds) {
         "dmiPlugin": DMI_PLUGIN_URL,
         "createdCmHandles": cmHandleIds.map((cmHandleId, index) => {
             // Ensure unique networkSegment within range 1-10
-            let networkSegmentId = Math.floor(Math.random() * 10) + 1; // Random between 1-10
+            let networkSegmentId = Math.floor(Math.random() * 10) + 1;
             let moduleTag = MODULE_SET_TAGS[index % MODULE_SET_TAGS.length];
 
             return {
                 "cmHandle": cmHandleId,
-                "alternateId": cmHandleId.replace('ch-', 'Region=NorthAmerica,Segment='),
+                "alternateId": getAlternateId(cmHandleId.replace('ch-', '')),
                 "moduleSetTag": moduleTag,
+                "dataProducerIdentifier": "some-data-producer-id",
                 "cmHandleProperties": {
                     "segmentId": index + 1,
-                    "networkSegment": `Region=NorthAmerica,Segment=${networkSegmentId}`, // Unique within range 1-10
-                    "deviceIdentifier": `Element=RadioBaseStation_5G_${index + 1000}`, // Unique per cmHandle
-                    "hardwareVersion": `HW-${moduleTag}`, // Shares uniqueness with moduleSetTag
-                    "softwareVersion": `Firmware_${moduleTag}`, // Shares uniqueness with moduleSetTag
+                    "networkSegment": `Region=NorthAmerica,Segment=${networkSegmentId}`,
+                    "deviceIdentifier": `Element=RadioBaseStation_5G_${index + 1000}`,
+                    "hardwareVersion": `HW-${moduleTag}`,
+                    "softwareVersion": `Firmware_${moduleTag}`,
                     "syncStatus": "ACTIVE",
                     "nodeCategory": "VirtualNode"
                 },
index c673257..e502e5d 100644 (file)
@@ -23,24 +23,24 @@ import {
     performGetRequest,
     NCMP_BASE_URL,
     LEGACY_BATCH_TOPIC_NAME,
-    getRandomCmHandleReference,
+    getRandomAlternateId,
 } from './utils.js';
 
-export function passthroughRead(useAlternateId) {
-    const cmHandleReference = getRandomCmHandleReference(useAlternateId);
+export function passthroughRead() {
+    const randomAlternateId  = getRandomAlternateId();
     const resourceIdentifier = 'ManagedElement=NRNode1/GNBDUFunction=1';
     const datastoreName = 'ncmp-datastore:passthrough-operational';
     const includeDescendants = true;
-    const url = generatePassthroughUrl(cmHandleReference, datastoreName, resourceIdentifier, includeDescendants);
+    const url = generatePassthroughUrl(randomAlternateId , datastoreName, resourceIdentifier, includeDescendants);
     return performGetRequest(url, 'passthroughRead');
 }
 
-export function passthroughWrite(useAlternateId) {
-    const cmHandleReference = getRandomCmHandleReference(useAlternateId);
+export function passthroughWrite() {
+    const randomAlternateId  = getRandomAlternateId();
     const resourceIdentifier = 'ManagedElement=NRNode1/GNBDUFunction=1';
     const datastoreName = 'ncmp-datastore:passthrough-running';
     const includeDescendants = false;
-    const url = generatePassthroughUrl(cmHandleReference, datastoreName, resourceIdentifier, includeDescendants);
+    const url = generatePassthroughUrl(randomAlternateId , datastoreName, resourceIdentifier, includeDescendants);
     const payload = JSON.stringify({
         "id": "123",
         "attributes": {"userLabel": "test"}
@@ -65,7 +65,8 @@ export function legacyBatchRead(cmHandleIds) {
     return performPostRequest(url, payload, 'batchRead');
 }
 
-function generatePassthroughUrl(cmHandleReference, datastoreName, resourceIdentifier, includeDescendants) {
+function generatePassthroughUrl(alternateId, datastoreName, resourceIdentifier, includeDescendants) {
+    const encodedAlternateId = encodeURIComponent(alternateId);
     const descendantsParam = includeDescendants ? `&include-descendants=${includeDescendants}` : '';
-    return `${NCMP_BASE_URL}/ncmp/v1/ch/${cmHandleReference}/data/ds/${datastoreName}?resourceIdentifier=${resourceIdentifier}${descendantsParam}`;
+    return `${NCMP_BASE_URL}/ncmp/v1/ch/${encodedAlternateId}/data/ds/${datastoreName}?resourceIdentifier=${resourceIdentifier}${descendantsParam}`;
 }
\ No newline at end of file
index 09144a7..ca3a651 100644 (file)
  *  ============LICENSE_END=========================================================
  */
 
-import { randomIntBetween } from 'https://jslib.k6.io/k6-utils/1.2.0/index.js';
+import {randomIntBetween} from 'https://jslib.k6.io/k6-utils/1.2.0/index.js';
 import http from 'k6/http';
+import {check} from 'k6';
+import {Trend} from 'k6/metrics';
 
 export const testConfig = JSON.parse(open(`../config/${__ENV.TEST_PROFILE}.json`));
+export const testKpiMetaData = JSON.parse(open(`../config/test-kpi-metadata.json`));
 export const KAFKA_BOOTSTRAP_SERVERS = testConfig.hosts.kafkaBootstrapServer;
 export const NCMP_BASE_URL = testConfig.hosts.ncmpBaseUrl;
 export const DMI_PLUGIN_URL = testConfig.hosts.dmiStubUrl;
@@ -57,19 +60,26 @@ export function makeBatchOfCmHandleIds(batchSize, batchNumber) {
 export function makeRandomBatchOfAlternateIds() {
     const alternateIds = new Set();
     while (alternateIds.size < LEGACY_BATCH_THROUGHPUT_TEST_BATCH_SIZE) {
-        alternateIds.add(getRandomCmHandleReference(true));
+        alternateIds.add(getRandomAlternateId());
     }
     return Array.from(alternateIds)
 }
 
 /**
- * Generates a random CM Handle reference based on the provided flag.
- * @param useAlternateId
- * @returns {string} CM Handle reference representing a CM handle ID or an alternate ID.
+ * Generates a random CM Handle alternate ID.
+ *
+ * This function selects a random CM Handle ID between 1 and TOTAL_CM_HANDLES (inclusive)
+ * and returns its corresponding alternate ID by invoking `getAlternateId(id)`.
+ *
+ * @returns {string} A CM Handle alternate ID derived from a randomly selected CM Handle ID.
  */
-export function getRandomCmHandleReference(useAlternateId) {
-    const prefix = useAlternateId ? 'Region=NorthAmerica,Segment=' : 'ch-';
-    return `${prefix}${randomIntBetween(1, TOTAL_CM_HANDLES)}`;
+export function getRandomAlternateId() {
+    let randomCmHandleId = randomIntBetween(1, TOTAL_CM_HANDLES);
+    return getAlternateId(randomCmHandleId);
+}
+
+export function getAlternateId(cmHandleNumericId) {
+    return `/SubNetwork=Europe/SubNetwork=Ireland/MeContext=MyRadioNode${cmHandleNumericId}/ManagedElement=MyManagedElement${cmHandleNumericId}`;
 }
 
 /**
@@ -112,22 +122,17 @@ export function performGetRequest(url, metricTag) {
 export function makeCustomSummaryReport(testResults, scenarioConfig) {
     const summaryCsvLines = [
         '#,Test Name,Unit,Fs Requirement,Current Expectation,Actual',
-        makeSummaryCsvLine('0', 'HTTP request failures for all tests', 'rate of failed requests', 'http_req_failed', 0, testResults, scenarioConfig),
-        makeSummaryCsvLine('1', 'Registration of CM-handles', 'CM-handles/second', 'cmhandles_created_per_second', 100, testResults, scenarioConfig),
-        makeSummaryCsvLine('2', 'De-registration of CM-handles', 'CM-handles/second', 'cmhandles_deleted_per_second', 180, testResults, scenarioConfig),
-        makeSummaryCsvLine('3a', 'CM-handle ID search with No filter', 'milliseconds', 'id_search_nofilter_duration', 550, testResults, scenarioConfig),
-        makeSummaryCsvLine('3b', 'CM-handle ID search with Module filter', 'milliseconds', 'id_search_module_duration', 2300, testResults, scenarioConfig),
-        makeSummaryCsvLine('3c', 'CM-handle ID search with Property filter', 'milliseconds', 'id_search_property_duration', 1450, testResults, scenarioConfig),
-        makeSummaryCsvLine('3d', 'CM-handle ID search with Cps Path filter', 'milliseconds', 'id_search_cpspath_duration', 1500, testResults, scenarioConfig),
-        makeSummaryCsvLine('3e', 'CM-handle ID search with Trust Level filter', 'milliseconds', 'id_search_trustlevel_duration', 1600, testResults, scenarioConfig),
-        makeSummaryCsvLine('4a', 'CM-handle search with No filter', 'milliseconds', 'cm_search_nofilter_duration', 18000, testResults, scenarioConfig),
-        makeSummaryCsvLine('4b', 'CM-handle search with Module filter', 'milliseconds', 'cm_search_module_duration', 18000, testResults, scenarioConfig),
-        makeSummaryCsvLine('4c', 'CM-handle search with Property filter', 'milliseconds', 'cm_search_property_duration', 18000, testResults, scenarioConfig),
-        makeSummaryCsvLine('4d', 'CM-handle search with Cps Path filter', 'milliseconds', 'cm_search_cpspath_duration', 18000, testResults, scenarioConfig),
-        makeSummaryCsvLine('4e', 'CM-handle search with Trust Level filter', 'milliseconds', 'cm_search_trustlevel_duration', 18000, testResults, scenarioConfig),
-        makeSummaryCsvLine('5b', 'NCMP overhead for Synchronous single CM-handle pass-through read with alternate id', 'milliseconds', 'ncmp_overhead_passthrough_read_alt_id', 18, testResults, scenarioConfig),
-        makeSummaryCsvLine('6b', 'NCMP overhead for Synchronous single CM-handle pass-through write with alternate id', 'milliseconds', 'ncmp_overhead_passthrough_write_alt_id', 18, testResults, scenarioConfig),
-        makeSummaryCsvLine('7', 'Legacy batch read operation', 'events/second', 'legacy_batch_read_cmhandles_per_second', 200, testResults, scenarioConfig),
+        ...testKpiMetaData.map(test => {
+            return makeSummaryCsvLine(
+                test.label,
+                test.name,
+                test.unit,
+                test.metric,
+                test.cpsAverage,
+                testResults,
+                scenarioConfig
+            );
+        })
     ];
     return summaryCsvLines.join('\n') + '\n';
 }
@@ -139,3 +144,27 @@ function makeSummaryCsvLine(testCase, testName, unit, measurementName, currentEx
     const actualValue = testResults.metrics[measurementName].values[thresholdKey].toFixed(3);
     return `${testCase},${testName},${unit},${thresholdValue},${currentExpectation},${actualValue}`;
 }
+
+/**
+ * Handles the response by performing a check, logging errors if any, and recording overhead.
+ *
+ * @param {Object} response - The HTTP response object.
+ * @param {number} expectedStatus - The expected HTTP status code.
+ * @param {string} checkLabel - A descriptive label for the check.
+ * @param {number} delayMs - The predefined delay in milliseconds.
+ * @param {Trend} trendMetric - The Trend metric to record overhead.
+ */
+export function handleHttpResponse(response, expectedStatus, checkLabel, delayMs, trendMetric) {
+    const isSuccess = check(response, {
+        [checkLabel]: (responseObj) => responseObj.status === expectedStatus,
+    });
+
+    if (isSuccess) {
+        const overhead = response.timings.duration - delayMs;
+        trendMetric.add(overhead);
+    } else {
+        let responseBody = JSON.parse(response.body);
+        console.error(`${checkLabel} failed: Error response status: ${response.status}, message: ${responseBody.message}, details: ${responseBody.details}`);
+    }
+}
+
diff --git a/k6-tests/ncmp/common/write-data-job.js b/k6-tests/ncmp/common/write-data-job.js
new file mode 100644 (file)
index 0000000..9da9206
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ *  ============LICENSE_START=======================================================
+ *  Copyright (C) 2025 OpenInfra Foundation Europe. All rights reserved.
+ *  ================================================================================
+ *  Licensed under the Apache License, Version 2.0 (the "License");
+ *  you may not use this file except in compliance with the License.
+ *  You may obtain a copy of the License at
+ *
+ *        http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ *
+ *  SPDX-License-Identifier: Apache-2.0
+ *  ============LICENSE_END=========================================================
+ */
+
+import {crypto} from 'k6/experimental/webcrypto';
+import {performPostRequest, getRandomAlternateId, NCMP_BASE_URL} from './utils.js';
+
+/**
+ * Executes a write data job against the NCMP endpoint.
+ *
+ * @param {number} numberOfOperations - Number of base operation sets to include in the job.
+ * @returns {*} The HTTP response from the POST request.
+ */
+export function executeWriteDataJob(numberOfOperations) {
+    const jobId = crypto.randomUUID();
+    const requestPayload = buildDataJobRequestPayload(numberOfOperations);
+
+    console.debug(`[WriteJob] Starting job ā†’ ID: ${jobId}, Operations: ${numberOfOperations}`);
+    return sendWriteDataJobRequest(jobId, requestPayload);
+}
+
+/**
+ * Sends a write data job request to the NCMP API endpoint.
+ *
+ * @param {string} jobId - The unique identifier for this write job.
+ * @param {Object} payload - The complete request body for the write operation.
+ * @returns {*} The response from the HTTP POST request.
+ */
+function sendWriteDataJobRequest(jobId, payload) {
+    const targetUrl = `${NCMP_BASE_URL}/do-not-use/dataJobs/${jobId}/write`;
+    const serializedPayload = JSON.stringify(payload);
+    return performPostRequest(targetUrl, serializedPayload, 'WriteDataJob');
+}
+
+/**
+ * Builds the full payload for a write data job.
+ *
+ * Each base operation set consists of three write operations:
+ *  - `add` at a nested child path
+ *  - `merge` at a different child path
+ *  - `remove` at the parent path
+ *
+ * The structure returned matches the expected `DataJobRequest` format on the server side:
+ *
+ * Java-side representation:
+ * ```java
+ * public record DataJobRequest(
+ *   DataJobMetadata dataJobMetadata,
+ *   DataJobWriteRequest dataJobWriteRequest
+ * )
+ * ```
+ *
+ * @param {number} numberOfWriteOperations - The number of base sets to generate (each set = 3 operations).
+ * @returns {{
+ *   dataJobMetadata: {
+ *     destination: string,
+ *     dataAcceptType: string,
+ *     dataContentType: string
+ *   },
+ *   dataJobWriteRequest: {
+ *     data: Array<{
+ *       path: string,
+ *       op: "add" | "merge" | "remove",
+ *       operationId: string,
+ *       value: Map<String, Object>
+ *     }>
+ *   }
+ * }} Fully-formed data job request payload.
+ */
+function buildDataJobRequestPayload(numberOfWriteOperations) {
+    const operations = [];
+    for (let i = 1; i <= numberOfWriteOperations / 2; i++) {
+        const basePath = getRandomAlternateId();
+        operations.push(
+            {
+                path: `${basePath}/SomeChild=child-1`,
+                op: 'add',
+                operationId: `${i}-1`,
+                value: {
+                    key: `some-value-one-${i}`
+                }
+            },
+            {
+                path: `${basePath}/SomeChild=child-2/SomeGrandChild=grand-child-2`,
+                op: 'merge',
+                operationId: `${i}-2`,
+                value: {
+                    key: `some-value-two-${i}`
+                }
+            }
+        );
+    }
+    return {
+        dataJobMetadata: {
+            destination: "device/managed-element-collection",
+            dataAcceptType: "application/json",
+            dataContentType: "application/merge-patch+json"
+        },
+        dataJobWriteRequest: {
+            data: operations
+        }
+    };
+}
+
+
index 8f65b81..d9bdccb 100644 (file)
       "rate": 1,
       "timeUnit": "1s",
       "preAllocatedVUs": 1
+    },
+    "write_large_data_job_scenario": {
+      "executor": "constant-vus",
+      "exec": "writeDataJobLargeScenario",
+      "vus": 1,
+      "duration": "2h"
+    },
+    "write_small_data_job_scenario": {
+      "executor": "constant-vus",
+      "exec": "writeDataJobSmallScenario",
+      "vus": 10,
+      "duration": "2h"
     }
   }
 }
index dfe70cd..eee8398 100644 (file)
@@ -33,7 +33,7 @@
       "timeUnit": "2600ms",
       "duration": "15m",
       "preAllocatedVUs": 2,
-      "startTime": "0ms"
+      "startTime": "142ms"
     },
     "cm_handle_id_search_module_scenario": {
       "executor": "constant-arrival-rate",
@@ -42,7 +42,8 @@
       "timeUnit": "2600ms",
       "duration": "15m",
       "preAllocatedVUs": 2,
-      "startTime": "400ms"
+      "startTime": "613ms",
+      "maxVUs": 3
     },
     "cm_handle_id_search_property_scenario": {
       "executor": "constant-arrival-rate",
@@ -51,7 +52,7 @@
       "timeUnit": "2600ms",
       "duration": "15m",
       "preAllocatedVUs": 2,
-      "startTime": "800ms"
+      "startTime": "1084ms"
     },
     "cm_handle_id_search_cpspath_scenario": {
       "executor": "constant-arrival-rate",
@@ -60,7 +61,7 @@
       "timeUnit": "2600ms",
       "duration": "15m",
       "preAllocatedVUs": 2,
-      "startTime": "1200ms"
+      "startTime": "1555ms"
     },
     "cm_handle_id_search_trustlevel_scenario": {
       "executor": "constant-arrival-rate",
@@ -69,7 +70,7 @@
       "timeUnit": "2600ms",
       "duration": "15m",
       "preAllocatedVUs": 2,
-      "startTime": "1600ms"
+      "startTime": "2026ms"
     },
     "cm_handle_search_nofilter_scenario": {
       "executor": "constant-arrival-rate",
@@ -78,7 +79,7 @@
       "timeUnit": "24s",
       "duration": "15m",
       "preAllocatedVUs": 2,
-      "startTime": "0s"
+      "startTime": "497ms"
     },
     "cm_handle_search_module_scenario": {
       "executor": "constant-arrival-rate",
@@ -87,7 +88,7 @@
       "timeUnit": "24s",
       "duration": "15m",
       "preAllocatedVUs": 2,
-      "startTime": "3s"
+      "startTime": "3568ms"
     },
     "cm_handle_search_property_scenario": {
       "executor": "constant-arrival-rate",
@@ -96,7 +97,7 @@
       "timeUnit": "24s",
       "duration": "15m",
       "preAllocatedVUs": 2,
-      "startTime": "6s"
+      "startTime": "6639ms"
     },
     "cm_handle_search_cpspath_scenario": {
       "executor": "constant-arrival-rate",
       "timeUnit": "24s",
       "duration": "15m",
       "preAllocatedVUs": 2,
-      "startTime": "9s"
+      "startTime": "9710ms"
     },
     "cm_handle_search_trustlevel_scenario": {
       "executor": "constant-arrival-rate",
       "timeUnit": "24s",
       "duration": "15m",
       "preAllocatedVUs": 2,
-      "startTime": "12s"
+      "startTime": "12781ms",
+      "maxVUs": 3
     },
     "legacy_batch_produce_scenario": {
       "executor": "constant-arrival-rate",
       "rate": 1,
       "preAllocatedVUs": 1,
       "timeUnit": "1s",
-      "duration": "15m"
+      "duration": "15m10s",
+      "maxVUs": 2,
+      "startTime": "71ms"
     },
     "legacy_batch_consume_scenario": {
       "executor": "per-vu-iterations",
       "exec": "legacyBatchConsumeScenario",
       "vus": 1,
       "iterations": 1,
-      "maxDuration": "16m"
+      "maxDuration": "16m",
+      "startTime": "71ms"
     },
     "produceCmAvcBackGroundLoadAtPeakRate": {
       "executor": "constant-arrival-rate",
       "duration": "15m",
       "preAllocatedVUs": 11,
       "maxVUs": 12,
-      "gracefulStop": "10s"
+      "gracefulStop": "10s",
+      "startTime": "0ms"
+    },
+    "write_large_data_job_scenario": {
+      "executor": "constant-arrival-rate",
+      "exec": "writeDataJobLargeScenario",
+      "rate": 1,
+      "timeUnit": "60s",
+      "duration": "15m",
+      "preAllocatedVUs": 1,
+      "startTime": "852ms"
+    },
+    "write_small_data_job_scenario": {
+      "executor": "constant-arrival-rate",
+      "exec": "writeDataJobSmallScenario",
+      "rate": 2,
+      "timeUnit": "1s",
+      "duration": "15m",
+      "preAllocatedVUs": 10,
+      "maxVUs": 12,
+      "startTime": "923ms"
     }
   },
-  "thresholds": {
-    "http_req_failed": ["rate == 0"],
-    "cmhandles_created_per_second": ["avg >= 22"],
-    "cmhandles_deleted_per_second": ["avg >= 22"],
-    "ncmp_overhead_passthrough_read_alt_id": ["avg <= 40"],
-    "ncmp_overhead_passthrough_write_alt_id": ["avg <= 40"],
-    "id_search_nofilter_duration": ["avg <= 2600"],
-    "id_search_module_duration": ["avg <= 2600"],
-    "id_search_property_duration": ["avg <= 2600"],
-    "id_search_cpspath_duration": ["avg <= 2600"],
-    "id_search_trustlevel_duration": ["avg <= 2600"],
-    "cm_search_nofilter_duration": ["avg <= 24000"],
-    "cm_search_module_duration": ["avg <= 24000"],
-    "cm_search_property_duration": ["avg <= 24000"],
-    "cm_search_cpspath_duration": ["avg <= 24000"],
-    "cm_search_trustlevel_duration": ["avg <= 24000"],
-    "legacy_batch_read_cmhandles_per_second": ["avg >= 150"]
-  }
+  "thresholds": "#SCENARIO-THRESHOLDS#"
 }
diff --git a/k6-tests/ncmp/config/test-kpi-metadata.json b/k6-tests/ncmp/config/test-kpi-metadata.json
new file mode 100644 (file)
index 0000000..17e3ec3
--- /dev/null
@@ -0,0 +1,146 @@
+[
+  {
+    "label": "0",
+    "name": "HTTP request failures for all tests",
+    "unit": "rate of failed requests",
+    "metric": "http_req_failed",
+    "cpsAverage": 0.00,
+    "kpiThreshold": 0.01
+  },
+  {
+    "label": "1",
+    "name": "Registration of CM-handles",
+    "unit": "CM-handles/second",
+    "metric": "cm_handles_created",
+    "cpsAverage": 100,
+    "kpiThreshold": 22
+  },
+  {
+    "label": "2",
+    "name": "De-registration of CM-handles",
+    "unit": "CM-handles/second",
+    "metric": "cm_handles_deleted",
+    "cpsAverage": 180,
+    "kpiThreshold": 22
+  },
+  {
+    "label": "3a",
+    "name": "CM-handle ID search with No filter",
+    "unit": "milliseconds",
+    "metric": "cm_handle_id_search_no_filter",
+    "cpsAverage": 550,
+    "kpiThreshold": 2600
+  },
+  {
+    "label": "3b",
+    "name": "CM-handle ID search with Module filter",
+    "unit": "milliseconds",
+    "metric": "cm_handle_id_search_module_filter",
+    "cpsAverage": 2300,
+    "kpiThreshold": 2600
+  },
+  {
+    "label": "3c",
+    "name": "CM-handle ID search with Property filter",
+    "unit": "milliseconds",
+    "metric": "cm_handle_id_search_property_filter",
+    "cpsAverage": 1450,
+    "kpiThreshold": 2600
+  },
+  {
+    "label": "3d",
+    "name": "CM-handle ID search with Cps Path filter",
+    "unit": "milliseconds",
+    "metric": "cm_handle_id_search_cps_path_filter",
+    "cpsAverage": 1500,
+    "kpiThreshold": 2600
+  },
+  {
+    "label": "3e",
+    "name": "CM-handle ID search with Trust Level filter",
+    "unit": "milliseconds",
+    "metric": "cm_handle_id_search_trust_level_filter",
+    "cpsAverage": 1600,
+    "kpiThreshold": 2600
+  },
+  {
+    "label": "4a",
+    "name": "CM-handle search with No filter",
+    "unit": "milliseconds",
+    "metric": "cm_handle_search_no_filter",
+    "cpsAverage": 18000,
+    "kpiThreshold": 24000
+  },
+  {
+    "label": "4b",
+    "name": "CM-handle search with Module filter",
+    "unit": "milliseconds",
+    "metric": "cm_handle_search_module_filter",
+    "cpsAverage": 18000,
+    "kpiThreshold": 24000
+  },
+  {
+    "label": "4c",
+    "name": "CM-handle search with Property filter",
+    "unit": "milliseconds",
+    "metric": "cm_handle_search_property_filter",
+    "cpsAverage": 18000,
+    "kpiThreshold": 24000
+  },
+  {
+    "label": "4d",
+    "name": "CM-handle search with Cps Path filter",
+    "unit": "milliseconds",
+    "metric": "cm_handle_search_cps_path_filter",
+    "cpsAverage": 18000,
+    "kpiThreshold": 24000
+  },
+  {
+    "label": "4e",
+    "name": "CM-handle search with Trust Level filter",
+    "unit": "milliseconds",
+    "metric": "cm_handle_search_trust_level_filter",
+    "cpsAverage": 18000,
+    "kpiThreshold": 24000
+  },
+  {
+    "label": "5b",
+    "name": "NCMP overhead for Synchronous single CM-handle pass-through read with alternate id",
+    "unit": "milliseconds",
+    "metric": "ncmp_read_overhead",
+    "cpsAverage": 18,
+    "kpiThreshold": 40
+  },
+  {
+    "label": "6b",
+    "name": "NCMP overhead for Synchronous single CM-handle pass-through write with alternate id",
+    "unit": "milliseconds",
+    "metric": "ncmp_write_overhead",
+    "cpsAverage": 18,
+    "kpiThreshold": 40
+  },
+  {
+    "label": "7",
+    "name": "Legacy batch read operation",
+    "unit": "events/second",
+    "metric": "legacy_batch_read",
+    "cpsAverage": 200,
+    "kpiThreshold": 150
+  },
+  {
+    "label": "8",
+    "name": "Write data job scenario - small",
+    "unit": "milliseconds",
+    "metric": "dcm_write_data_job_small",
+    "cpsAverage": 100,
+    "kpiThreshold": 500
+  },
+  {
+    "label": "9",
+    "name": "Write data job scenario - large",
+    "unit": "milliseconds",
+    "metric": "dcm_write_data_job_large",
+    "cpsAverage": 8000,
+    "kpiThreshold": 30000
+  }
+]
\ No newline at end of file
index 5049bba..a3c30d8 100644 (file)
@@ -25,28 +25,17 @@ import {
     TOTAL_CM_HANDLES, READ_DATA_FOR_CM_HANDLE_DELAY_MS, WRITE_DATA_FOR_CM_HANDLE_DELAY_MS,
     makeCustomSummaryReport, makeBatchOfCmHandleIds, makeRandomBatchOfAlternateIds,
     LEGACY_BATCH_THROUGHPUT_TEST_BATCH_SIZE, REGISTRATION_BATCH_SIZE,
-    KAFKA_BOOTSTRAP_SERVERS, LEGACY_BATCH_TOPIC_NAME, CONTAINER_UP_TIME_IN_SECONDS, testConfig
+    KAFKA_BOOTSTRAP_SERVERS, LEGACY_BATCH_TOPIC_NAME, CONTAINER_UP_TIME_IN_SECONDS, testConfig, handleHttpResponse
 } from './common/utils.js';
 import { createCmHandles, deleteCmHandles, waitForAllCmHandlesToBeReady } from './common/cmhandle-crud.js';
 import { executeCmHandleSearch, executeCmHandleIdSearch } from './common/search-base.js';
 import { passthroughRead, passthroughWrite, legacyBatchRead } from './common/passthrough-crud.js';
 import { sendBatchOfKafkaMessages } from './common/produce-avc-event.js';
+import { executeWriteDataJob } from "./common/write-data-job.js";
 
-let cmHandlesCreatedPerSecondTrend = new Trend('cmhandles_created_per_second', false);
-let cmHandlesDeletedPerSecondTrend = new Trend('cmhandles_deleted_per_second', false);
-let passthroughReadNcmpOverheadTrendWithAlternateId = new Trend('ncmp_overhead_passthrough_read_alt_id', true);
-let passthroughWriteNcmpOverheadTrendWithAlternateId = new Trend('ncmp_overhead_passthrough_write_alt_id', true);
-let idSearchNoFilterDurationTrend = new Trend('id_search_nofilter_duration', true);
-let idSearchModuleDurationTrend = new Trend('id_search_module_duration', true);
-let idSearchPropertyDurationTrend = new Trend('id_search_property_duration', true);
-let idSearchCpsPathDurationTrend = new Trend('id_search_cpspath_duration', true);
-let idSearchTrustLevelDurationTrend = new Trend('id_search_trustlevel_duration', true);
-let cmSearchNoFilterDurationTrend = new Trend('cm_search_nofilter_duration', true);
-let cmSearchModuleDurationTrend = new Trend('cm_search_module_duration', true);
-let cmSearchPropertyDurationTrend = new Trend('cm_search_property_duration', true);
-let cmSearchCpsPathDurationTrend = new Trend('cm_search_cpspath_duration', true);
-let cmSearchTrustLevelDurationTrend = new Trend('cm_search_trustlevel_duration', true);
-let legacyBatchReadCmHandlesPerSecondTrend = new Trend('legacy_batch_read_cmhandles_per_second', false);
+#METRICS-TRENDS-PLACE-HOLDER#
+
+const EXPECTED_WRITE_RESPONSE_COUNT = 1;
 
 export const legacyBatchEventReader = new Reader({
     brokers: [KAFKA_BOOTSTRAP_SERVERS],
@@ -67,7 +56,7 @@ export function setup() {
     for (let batchNumber = 0; batchNumber < TOTAL_BATCHES; batchNumber++) {
         const nextBatchOfCmHandleIds = makeBatchOfCmHandleIds(REGISTRATION_BATCH_SIZE, batchNumber);
         const response = createCmHandles(nextBatchOfCmHandleIds);
-        check(response, { 'create CM-handles status equals 200': (r) => r.status === 200 });
+        check(response, { 'create CM-handles status equals 200': (response) => response.status === 200 });
     }
 
     waitForAllCmHandlesToBeReady();
@@ -75,7 +64,7 @@ export function setup() {
     const endTimeInMillis = Date.now();
     const totalRegistrationTimeInSeconds = (endTimeInMillis - startTimeInMillis) / 1000.0;
 
-    cmHandlesCreatedPerSecondTrend.add(TOTAL_CM_HANDLES / totalRegistrationTimeInSeconds);
+    cmHandlesCreatedTrend.add(TOTAL_CM_HANDLES / totalRegistrationTimeInSeconds);
 }
 
 export function teardown() {
@@ -89,117 +78,129 @@ export function teardown() {
         if (response.error_code === 0) {
               DEREGISTERED_CM_HANDLES += REGISTRATION_BATCH_SIZE
         }
-        check(response, { 'delete CM-handles status equals 200': (r) => r.status === 200 });
+        check(response, { 'delete CM-handles status equals 200': (response) => response.status === 200 });
     }
 
     const endTimeInMillis = Date.now();
     const totalDeregistrationTimeInSeconds = (endTimeInMillis - startTimeInMillis) / 1000.0;
 
-    cmHandlesDeletedPerSecondTrend.add(DEREGISTERED_CM_HANDLES / totalDeregistrationTimeInSeconds);
+    cmHandlesDeletedTrend.add(DEREGISTERED_CM_HANDLES / totalDeregistrationTimeInSeconds);
 
     sleep(CONTAINER_UP_TIME_IN_SECONDS);
 }
 
 export function passthroughReadAltIdScenario() {
-    const response = passthroughRead(true);
-    if (check(response, { 'passthrough read with alternate Id status equals 200': (r) => r.status === 200 })) {
-        const overhead = response.timings.duration - READ_DATA_FOR_CM_HANDLE_DELAY_MS;
-        passthroughReadNcmpOverheadTrendWithAlternateId.add(overhead);
-    }
+    const response = passthroughRead();
+    handleHttpResponse(response, 200, 'passthrough read with alternate Id status equals 200',
+        READ_DATA_FOR_CM_HANDLE_DELAY_MS, ncmpReadOverheadTrend);
 }
 
 export function passthroughWriteAltIdScenario() {
-    const response = passthroughWrite(true);
-    if (check(response, { 'passthrough write with alternate Id status equals 201': (r) => r.status === 201 })) {
-        const overhead = response.timings.duration - WRITE_DATA_FOR_CM_HANDLE_DELAY_MS;
-        passthroughWriteNcmpOverheadTrendWithAlternateId.add(overhead);
-    }
+    const response = passthroughWrite();
+    handleHttpResponse(response, 201, 'passthrough write with alternate Id status equals 201',
+        WRITE_DATA_FOR_CM_HANDLE_DELAY_MS, ncmpWriteOverheadTrend);
 }
 
 export function cmHandleIdSearchNoFilterScenario() {
     const response = executeCmHandleIdSearch('no-filter');
-    if (check(response, { 'CM handle ID no-filter search status equals 200': (r) => r.status === 200 })
-     && check(response, { 'CM handle ID no-filter search returned the correct number of ids': (r) => r.json('#') === TOTAL_CM_HANDLES })) {
-        idSearchNoFilterDurationTrend.add(response.timings.duration);
+    if (check(response, { 'CM handle ID no-filter search status equals 200': (response) => response.status === 200 })
+     && check(response, { 'CM handle ID no-filter search returned the correct number of ids': (response) => response.json('#') === TOTAL_CM_HANDLES })) {
+        cmHandleIdSearchNoFilterTrend.add(response.timings.duration);
     }
 }
 
 export function cmHandleSearchNoFilterScenario() {
     const response = executeCmHandleSearch('no-filter');
-    if (check(response, { 'CM handle no-filter search status equals 200': (r) => r.status === 200 })
-     && check(response, { 'CM handle no-filter search returned expected CM-handles': (r) => r.json('#') === TOTAL_CM_HANDLES })) {
-        cmSearchNoFilterDurationTrend.add(response.timings.duration);
+    if (check(response, { 'CM handle no-filter search status equals 200': (response) => response.status === 200 })
+     && check(response, { 'CM handle no-filter search returned expected CM-handles': (response) => response.json('#') === TOTAL_CM_HANDLES })) {
+        cmHandleSearchNoFilterTrend.add(response.timings.duration);
     }
 }
 
 export function cmHandleIdSearchModuleScenario() {
     const response = executeCmHandleIdSearch('module');
-    if (check(response, { 'CM handle ID module search status equals 200': (r) => r.status === 200 })
-     && check(response, { 'CM handle ID module search returned the correct number of ids': (r) => r.json('#') === TOTAL_CM_HANDLES })) {
-        idSearchModuleDurationTrend.add(response.timings.duration);
+    if (check(response, { 'CM handle ID module search status equals 200': (response) => response.status === 200 })
+     && check(response, { 'CM handle ID module search returned the correct number of ids': (response) => response.json('#') === TOTAL_CM_HANDLES })) {
+        cmHandleIdSearchModuleFilterTrend.add(response.timings.duration);
     }
 }
 
 export function cmHandleSearchModuleScenario() {
     const response = executeCmHandleSearch('module');
-    if (check(response, { 'CM handle module search status equals 200': (r) => r.status === 200 })
-     && check(response, { 'CM handle module search returned expected CM-handles': (r) => r.json('#') === TOTAL_CM_HANDLES })) {
-        cmSearchModuleDurationTrend.add(response.timings.duration);
+    if (check(response, { 'CM handle module search status equals 200': (response) => response.status === 200 })
+     && check(response, { 'CM handle module search returned expected CM-handles': (response) => response.json('#') === TOTAL_CM_HANDLES })) {
+        cmHandleSearchModuleFilterTrend.add(response.timings.duration);
     }
 }
 
 export function cmHandleIdSearchPropertyScenario() {
     const response = executeCmHandleIdSearch('property');
-    if (check(response, { 'CM handle ID property search status equals 200': (r) => r.status === 200 })
-     && check(response, { 'CM handle ID property search returned the correct number of ids': (r) => r.json('#') === TOTAL_CM_HANDLES })) {
-        idSearchPropertyDurationTrend.add(response.timings.duration);
+    if (check(response, { 'CM handle ID property search status equals 200': (response) => response.status === 200 })
+     && check(response, { 'CM handle ID property search returned the correct number of ids': (response) => response.json('#') === TOTAL_CM_HANDLES })) {
+        cmHandleIdSearchPropertyFilterTrend.add(response.timings.duration);
     }
 }
 
 export function cmHandleSearchPropertyScenario() {
     const response = executeCmHandleSearch('property');
-    if (check(response, { 'CM handle property search status equals 200': (r) => r.status === 200 })
-     && check(response, { 'CM handle property search returned expected CM-handles': (r) => r.json('#') === TOTAL_CM_HANDLES })) {
-        cmSearchPropertyDurationTrend.add(response.timings.duration);
+    if (check(response, { 'CM handle property search status equals 200': (response) => response.status === 200 })
+     && check(response, { 'CM handle property search returned expected CM-handles': (response) => response.json('#') === TOTAL_CM_HANDLES })) {
+        cmHandleSearchPropertyFilterTrend.add(response.timings.duration);
     }
 }
 
 export function cmHandleIdSearchCpsPathScenario() {
     const response = executeCmHandleIdSearch('cps-path-for-ready-cm-handles');
-    if (check(response, { 'CM handle ID cps path search status equals 200': (r) => r.status === 200 })
-     && check(response, { 'CM handle ID cps path search returned the correct number of ids': (r) => r.json('#') === TOTAL_CM_HANDLES })) {
-        idSearchCpsPathDurationTrend.add(response.timings.duration);
+    if (check(response, { 'CM handle ID cps path search status equals 200': (response) => response.status === 200 })
+     && check(response, { 'CM handle ID cps path search returned the correct number of ids': (response) => response.json('#') === TOTAL_CM_HANDLES })) {
+        cmHandleIdSearchCpsPathFilterTrend.add(response.timings.duration);
     }
 }
 
 export function cmHandleSearchCpsPathScenario() {
     const response = executeCmHandleSearch('cps-path-for-ready-cm-handles');
-    if (check(response, { 'CM handle cps path search status equals 200': (r) => r.status === 200 })
-     && check(response, { 'CM handle cps path search returned expected CM-handles': (r) => r.json('#') === TOTAL_CM_HANDLES })) {
-        cmSearchCpsPathDurationTrend.add(response.timings.duration);
+    if (check(response, { 'CM handle cps path search status equals 200': (response) => response.status === 200 })
+     && check(response, { 'CM handle cps path search returned expected CM-handles': (response) => response.json('#') === TOTAL_CM_HANDLES })) {
+        cmHandleSearchCpsPathFilterTrend.add(response.timings.duration);
     }
 }
 
 export function cmHandleIdSearchTrustLevelScenario() {
     const response = executeCmHandleIdSearch('trust-level');
-    if (check(response, { 'CM handle ID trust level search status equals 200': (r) => r.status === 200 })
-     && check(response, { 'CM handle ID trust level search returned the correct number of cm handle references': (r) => r.json('#') === TOTAL_CM_HANDLES })) {
-        idSearchTrustLevelDurationTrend.add(response.timings.duration);
+    if (check(response, { 'CM handle ID trust level search status equals 200': (response) => response.status === 200 })
+     && check(response, { 'CM handle ID trust level search returned the correct number of cm handle references': (response) => response.json('#') === TOTAL_CM_HANDLES })) {
+        cmHandleIdSearchTrustLevelFilterTrend.add(response.timings.duration);
     }
 }
 
 export function cmHandleSearchTrustLevelScenario() {
     const response = executeCmHandleSearch('trust-level');
-    if (check(response, { 'CM handle trust level search status equals 200': (r) => r.status === 200 })
-     && check(response, { 'CM handle trust level search returned expected CM-handles': (r) => r.json('#') === TOTAL_CM_HANDLES })) {
-        cmSearchTrustLevelDurationTrend.add(response.timings.duration);
+    if (check(response, { 'CM handle trust level search status equals 200': (response) => response.status === 200 })
+     && check(response, { 'CM handle trust level search returned expected CM-handles': (response) => response.json('#') === TOTAL_CM_HANDLES })) {
+        cmHandleSearchTrustLevelFilterTrend.add(response.timings.duration);
     }
 }
 
 export function legacyBatchProduceScenario() {
     const nextBatchOfAlternateIds = makeRandomBatchOfAlternateIds();
     const response = legacyBatchRead(nextBatchOfAlternateIds);
-    check(response, { 'data operation batch read status equals 200': (r) => r.status === 200 });
+    check(response, { 'data operation batch read status equals 200': (response) => response.status === 200 });
+}
+
+export function writeDataJobLargeScenario() {
+    const response = executeWriteDataJob(100000);
+    if (check(response, {'large  writeDataJob response status is 200': (response) => response.status === 200})
+        && check(response, {'large  writeDataJob received expected number of responses': (response) => response.json('#') === EXPECTED_WRITE_RESPONSE_COUNT})) {
+        dcmWriteDataJobLargeTrend.add(response.timings.duration);
+    }
+}
+
+export function writeDataJobSmallScenario() {
+    const response = executeWriteDataJob(100);
+    if (check(response, {'small writeDataJob response status is 200': (response) => response.status === 200})
+        && check(response, {'small writeDataJob received expected number of responses': (response) => response.json('#') === EXPECTED_WRITE_RESPONSE_COUNT})) {
+        dcmWriteDataJobSmallTrend.add(response.timings.duration);
+    }
 }
 
 export function produceAvcEventsScenario() {
@@ -209,23 +210,43 @@ export function produceAvcEventsScenario() {
 export function legacyBatchConsumeScenario() {
     // calculate total messages 15 minutes times 60 seconds times
     const TOTAL_MESSAGES_TO_CONSUME = 15 * 60 * LEGACY_BATCH_THROUGHPUT_TEST_BATCH_SIZE;
+    console.log("šŸ“„ [legacy batch consume scenario] Starting consumption of", TOTAL_MESSAGES_TO_CONSUME, "messages...");
+
     try {
         let messagesConsumed = 0;
-        let startTime = Date.now();
+        const startTime = Date.now();
 
         while (messagesConsumed < TOTAL_MESSAGES_TO_CONSUME) {
-            let messages = legacyBatchEventReader.consume({ limit: LEGACY_BATCH_THROUGHPUT_TEST_BATCH_SIZE });
-            if (messages.length > 0) {
-                messagesConsumed += messages.length;
+            try {
+                const messages = legacyBatchEventReader.consume({
+                    limit: LEGACY_BATCH_THROUGHPUT_TEST_BATCH_SIZE,
+                    timeout: 30000,
+                });
+
+                if (messages.length > 0) {
+                    messagesConsumed += messages.length;
+                    console.debug(`āœ… Consumed ${messages.length} messages by legacy batch read (total: ${messagesConsumed}/${TOTAL_MESSAGES_TO_CONSUME})`);
+                } else {
+                    console.warn("āš ļø No messages received by legacy batch read.");
+                }
+            } catch (err) {
+                console.error(`āŒ Consume error (legacy batch read): ${err.message}`);
             }
         }
 
-        let endTime = Date.now();
+        const endTime = Date.now();
         const timeToConsumeMessagesInSeconds = (endTime - startTime) / 1000.0;
-        legacyBatchReadCmHandlesPerSecondTrend.add(TOTAL_MESSAGES_TO_CONSUME / timeToConsumeMessagesInSeconds);
+
+        if (messagesConsumed > 0) {
+            legacyBatchReadTrend.add(messagesConsumed / timeToConsumeMessagesInSeconds);
+            console.log(`šŸ Finished (legacy batch read): Consumed ${messagesConsumed} messages in ${timeToConsumeMessagesInSeconds.toFixed(2)}s.`);
+        } else {
+            legacyBatchReadCmhandlesPerSecondTrend.add(0);
+            console.error("āš ļø No messages consumed by legacy read batch.");
+        }
     } catch (error) {
-        legacyBatchReadCmHandlesPerSecondTrend.add(0);
-        console.error(error);
+        legacyBatchReadTrend.add(0);
+        console.error("šŸ’„ Legacy batch read scenario failed:", error.message);
     }
 }
 
index 780713e..c7197b6 100755 (executable)
 # limitations under the License.
 #
 
-pushd "$(dirname "$0")" >/dev/null || exit 1
+# ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
+# šŸ“ Navigate to Script Directory
+# ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
+pushd "$(dirname "$0")" >/dev/null || { echo "āŒ Failed to access script directory. Exiting."; exit 1; }
 
+# ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
+# šŸ“Œ Global Variables
+# ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
 number_of_failures=0
 testProfile=$1
 summaryFile="${testProfile}Summary.csv"
-echo "Running $testProfile performance tests..."
+KPI_METADATA_FILE="./config/test-kpi-metadata.json"
+KPI_CONFIG_FILE="./config/kpi.json"
+NCMP_RUNNER_FILE="ncmp-test-runner.js"
 
-k6 run ncmp-test-runner.js -e TEST_PROFILE="$testProfile"  > "$summaryFile" || ((number_of_failures++))
+echo
+echo "šŸ“¢ Running NCMP K6 performance test for profile: [$testProfile]"
+echo
 
+# ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
+# 1ļøāƒ£ Generate trend declarations and thresholds from metadata
+# ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
+echo "šŸ”§ Generating trend declarations, thresholds from [$KPI_METADATA_FILE] and updating [$NCMP_RUNNER_FILE] and [$KPI_CONFIG_FILE]..."
+
+read -r -d '' jq_script << 'EOF'
+def toCamelCase:
+  split("_") as $parts |
+  ($parts[0]) + ($parts[1:] | map((.[0:1] | ascii_upcase) + .[1:]) | join(""));
+
+reduce .[] as $item (
+  { trends: [], thresholds: {} };
+  if ($item.unit == "milliseconds") or ($item.unit | test("/second")) then
+    .trends += [
+      "export let \($item.metric | toCamelCase)Trend = new Trend('\($item.metric)', \($item.unit == "milliseconds"));"
+    ]
+  else
+    .
+  end
+  |
+  .thresholds[$item.metric] = (
+    if $item.metric == "http_req_failed" then
+      ["rate <= \($item.kpiThreshold)"]
+    elif ($item.unit | test("/second")) then
+      ["avg >= \($item.kpiThreshold)"]
+    else
+      ["avg <= \($item.kpiThreshold)"]
+    end
+  )
+)
+EOF
+
+# Execute jq script
+jq_output=$(jq -r "$jq_script" "$KPI_METADATA_FILE")
+
+# Extract trends and thresholds
+trend_declarations=$(echo "$jq_output" | jq -r '.trends[]')
+thresholds_json=$(echo "$jq_output" | jq '.thresholds')
+
+# Replace placeholder in runner with generated trends
+TMP_FILE=$(mktemp)
+awk -v trends="$trend_declarations" '
+  BEGIN { replaced=0 }
+  {
+    if ($0 ~ /#METRICS-TRENDS-PLACE-HOLDER#/ && replaced == 0) {
+      print trends
+      replaced=1
+    } else {
+      print $0
+    }
+  }
+' "$NCMP_RUNNER_FILE" > "$TMP_FILE"
+mv "$TMP_FILE" "$NCMP_RUNNER_FILE"
+echo "āœ… Trend declarations inserted."
+
+# Update thresholds in KPI config
+TMP_FILE=$(mktemp)
+cp "$KPI_CONFIG_FILE" "$TMP_FILE"
+jq --argjson thresholds "$thresholds_json" '
+  .thresholds = $thresholds
+' "$TMP_FILE" | jq '.' > "$KPI_CONFIG_FILE"
+rm -f "$TMP_FILE"
+echo "āœ… Threshold block has been injected into [$KPI_CONFIG_FILE]"
+echo
+
+# ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
+# 2ļøāƒ£ Run K6 and Capture Output
+# ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
+k6 run ncmp-test-runner.js -e TEST_PROFILE="$testProfile" > "$summaryFile"
+k6_exit_code=$?
+
+case $k6_exit_code in
+  0) echo "āœ… K6 executed successfully for profile: [$testProfile]." ;;
+  99) echo "āš ļø  K6 thresholds failed (exit code 99). Processing failures..." ;;
+  *) echo "āŒ K6 execution error (exit code $k6_exit_code)."; ((number_of_failures++)) ;;
+esac
+
+# ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
+# 3ļøāƒ£ Extract and Filter Summary Data
+# ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
 if [ -f "$summaryFile" ]; then
+  echo "šŸ” Extracting expected test names from metadata..."
+  expected_tests=()
+  while IFS= read -r test_name; do
+    [[ -n "$test_name" ]] && expected_tests+=("$test_name")
+  done < <(jq -r '.[].name' "$KPI_METADATA_FILE")
+
+  if [[ ${#expected_tests[@]} -eq 0 ]]; then
+    echo "āŒ No test names found in metadata. Aborting."
+    exit 1
+  fi
+
+  filtered_summary=$(mktemp)
+
+  # Extract the CSV header line starting with '#'
+  grep -m 1 "^#" "$summaryFile" > "$filtered_summary"
+
+  # Match each expected test name with summary rows
+  for test_name in "${expected_tests[@]}"; do
+    trimmedTestName="$(echo "$test_name" | xargs)"
+    matched_line=$(grep -F "$trimmedTestName" "$summaryFile")
+    [[ -n "$matched_line" ]] && echo "$matched_line" >> "$filtered_summary" || echo "āš ļø Result not found for [$trimmedTestName]"
+  done
 
   # Output raw CSV for plotting job
-  echo "-- BEGIN CSV REPORT"
-  cat "$summaryFile"
-  echo "-- END CSV REPORT"
+  echo -e "\nšŸ“Š -- BEGIN CSV REPORT --"
+  cat "$filtered_summary"
+  echo -e "šŸ“Š -- -- END CSV REPORT --\n"
+
+  # ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
+  # 4ļøāƒ£ Evaluate FS Thresholds
+  # ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
+
+  # Evaluate FS pass/fail thresholds
+  annotated_summary=$(mktemp)
+  threshold_failures=0
+
+  # Append header with new column "Pass FS"
+  head -n 1 "$filtered_summary" | awk '{print $0",Pass FS"}' > "$annotated_summary"
+  tail -n +2 "$filtered_summary" > tmp_input
+
+  # Exit early if no valid test results were found in the filtered summary
+  if [[ ! -s tmp_input ]]; then
+    echo "āš ļø No valid test results found in [$summaryFile]. Skipping FS evaluation."
+    echo "āŒ Summary: No tests were executed or matched expected names."
+    ((number_of_failures++))
+    rm -f tmp_input "$summaryFile" "$filtered_summary"
+    popd >/dev/null || true
+    exit $number_of_failures
+  fi
+
+  # Process each test case (skip header and check values) append pass/fail to annotated_summary
+  while IFS=, read -r id test_name unit fs_requirement current_expected actual_value; do
+    [[ -z "$test_name" ]] && continue
+
+    # Trim whitespace from fs_requirement and actual
+    fs_req=$(echo "$fs_requirement" | xargs)
+    actual_val=$(echo "$actual_value" | xargs)
+    fs_pass_status="āœ…"
+
+    # Special case: zero actual is valid, assign āœ… without warning
+    if [[ "$test_name" == "HTTP request failures for all tests" ]]; then
+      if [[ "$actual_val" != "0" && "$actual_val" != "0.000" ]]; then
+        fs_condition_met=$(awk -v a="$actual_val" -v r="$fs_req" 'BEGIN { print (a <= r) ? 1 : 0 }')
+        [[ "$fs_condition_met" -ne 1 ]] && fs_pass_status="āŒ" && ((threshold_failures++))
+      fi
+    else
+
+      # For all other tests: if actual is 0 or 0.000, mark as āŒ failure
+      if [[ "$actual_val" == "0" || "$actual_val" == "0.000" ]]; then
+        fs_pass_status="āŒ"
+        echo "āŒ Error: Actual value for metric '$test_name' is 0. This may indicate an error or missing data."
+        ((threshold_failures++))
+      else
+        if [[ "$unit" == *"millisecond"* || "$unit" == *"rate of failed requests"* ]]; then
+          fs_condition_met=$(awk -v a="$actual_val" -v r="$fs_req" 'BEGIN { print (a <= r) ? 1 : 0 }')
+        else
+          fs_condition_met=$(awk -v a="$actual_val" -v r="$fs_req" 'BEGIN { print (a >= r) ? 1 : 0 }')
+        fi
+        [[ "$fs_condition_met" -ne 1 ]] && fs_pass_status="āŒ" && ((threshold_failures++))
+      fi
+    fi
+
+    echo "$id,$test_name,$unit,$fs_requirement,$current_expected,$actual_value,$fs_pass_status" >> "$annotated_summary"
+  done < tmp_input
+  rm -f tmp_input
+
+  # ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
+  # 5ļøāƒ£ Print Human-Readable Report
+  # ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
+  table_preview=$(column -t -s, "$annotated_summary")
+
+  # Compute table width safely
+  table_width=$(echo "$table_preview" | awk '{ if (length > max) max = length } END { print max }')
+
+  # Fallback if table_width is empty or not a valid number
+  if ! [[ "$table_width" =~ ^[0-9]+$ ]]; then
+    table_width=80
+  fi
+
+  # Now safely create the border line
+  border_line=$(printf '#%.0s' $(seq 1 "$table_width"))
+
+  format_title_spaced() {
+    local input="$1"
+    local result=""
+    for word in $input; do
+      for ((i=0; i<${#word}; i++)); do
+        result+="${word:$i:1} "
+      done
+      result+="  "
+    done
+    echo "$result"
+  }
+
+  # Pad title string to center it in the table width
+  raw_title="K6 ${testProfile^^} PERFORMANCE TEST RESULTS"
+
+  # Dynamically center title within the line
+  title="$(format_title_spaced "$raw_title")"
+  title_line=$(printf "## %*s %*s##" \
+    $(( (table_width - 6 + ${#title}) / 2 )) "$title" \
+    $(( (table_width - 6 - ${#title}) / 2 )) "")
+
+  # Print header
+  echo "$border_line"
+  echo "$title_line"
+  echo "$border_line"
+
+  # Then print the table
+  echo "$table_preview"
+
+  # Print closing border after the table
+  echo "$border_line"
   echo
 
-  # Output human-readable report
-  echo "####################################################################################################"
-  if [ "$testProfile" = "kpi" ]; then
-    echo "##            K 6     K P I       P E R F O R M A N C E   T E S T   R E S U L T S                  ##"
+  # šŸŽÆ Final FS Summary of threshold result
+  if (( threshold_failures > 0 )); then
+    echo "āŒ Summary: [$threshold_failures] test(s) failed FS requirements."
+    ((number_of_failures++))
   else
-    echo "##            K 6   E N D U R A N C E      P E R F O R M A N C E   T E S T   R E S U L T S         ##"
+    echo "āœ… All tests passed FS requirements."
   fi
-  echo "####################################################################################################"
-  column -t -s, "$summaryFile"
-  echo
 
-  # Clean up
-  rm -f "$summaryFile"
+  # Cleanup temp files
+  rm -f "$summaryFile" "$filtered_summary" "$annotated_summary"
 
-else
-  echo "Error: Failed to generate $summaryFile" >&2
+else  # no summary file
+  echo "āŒ Error: Summary file [$summaryFile] was not generated. Possible K6 failure."
   ((number_of_failures++))
 fi
 
-popd >/dev/null || exit 1
-
-echo "NCMP TEST FAILURES: $number_of_failures"
-exit $number_of_failures
+# ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
+# šŸ”š Final Exit
+# ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€
+popd >/dev/null || true
+exit $number_of_failures
\ No newline at end of file