csit/env.properties
csit/archives/
-/k6-tests/image/
-k6-tests/ncmp/config/kpi-scenario-execution-definition.json
-/k6-tests/ncmp/*Summary.csv
-/k6-tests/ncmp/scenario-javascript.js
\ No newline at end of file
+/k6-tests/logs/
+/k6-tests/image/
\ No newline at end of file
- name: internal
port: {{ .Values.kafka.externalService.ports.internal }}
targetPort: {{ .Values.kafka.externalService.ports.internal }}
- nodePort: {{ .Values.kafka.externalService.nodePorts.internal }}
- name: external
port: {{ .Values.kafka.externalService.ports.external }}
targetPort: {{ .Values.kafka.externalService.ports.external }}
internal: 9092
external: 9093
nodePorts:
- external: 30093 # Node port for external communication
+ external: 30093
zookeeperConnect: "cps-cps-and-ncmp-zookeeper.default.svc.cluster.local"
brokerId: 1
listeners: "INTERNAL://0.0.0.0:29092,EXTERNAL://0.0.0.0:9092,NODEPORT://0.0.0.0:9093"
import {Trend} from 'k6/metrics';
export const TEST_PROFILE = __ENV.TEST_PROFILE ? __ENV.TEST_PROFILE : 'kpi'
-export const testConfig = JSON.parse(open(`../config/${TEST_PROFILE}-scenario-execution-definition.json`));
-export const testKpiMetaData = JSON.parse(open(`../config/scenario-metadata.json`));
+export const testConfig = JSON.parse(open(`../config/${TEST_PROFILE}.json`));
export const KAFKA_BOOTSTRAP_SERVERS = testConfig.hosts.kafkaBootstrapServer;
export const NCMP_BASE_URL = testConfig.hosts.ncmpBaseUrl;
export const DMI_PLUGIN_URL = testConfig.hosts.dmiStubUrl;
return http.get(url, {tags: metricTags});
}
-export function makeCustomSummaryReport(metrics, thresholds) {
+export function makeCustomSummaryReport(testResults, scenarioConfig) {
const summaryCsvLines = [
'#,Test Name,Unit,Fs Requirement,Current Expectation,Actual',
- ...testKpiMetaData.map(test => {
- return makeSummaryCsvLine(
- test.label,
- test.name,
- test.unit,
- test.metric,
- test.cpsAverage,
- metrics,
- thresholds
- );
- })
+ makeSummaryCsvLine('0', 'HTTP request failures for all tests', 'rate of failed requests', 'http_req_failed', 0, testResults, scenarioConfig),
+ makeSummaryCsvLine('1', 'Registration of CM-handles', 'CM-handles/second', 'cm_handles_created', 100, testResults, scenarioConfig),
+ makeSummaryCsvLine('2', 'De-registration of CM-handles', 'CM-handles/second', 'cm_handles_deleted', 180, testResults, scenarioConfig),
+ makeSummaryCsvLine('3a', 'CM-handle ID search with No filter', 'milliseconds', 'cm_handle_id_search_no_filter', 550, testResults, scenarioConfig),
+ makeSummaryCsvLine('3b', 'CM-handle ID search with Module filter', 'milliseconds', 'cm_handle_id_search_module_filter', 2300, testResults, scenarioConfig),
+ makeSummaryCsvLine('3c', 'CM-handle ID search with Property filter', 'milliseconds', 'cm_handle_id_search_property_filter', 1450, testResults, scenarioConfig),
+ makeSummaryCsvLine('3d', 'CM-handle ID search with Cps Path filter', 'milliseconds', 'cm_handle_id_search_cps_path_filter', 1500, testResults, scenarioConfig),
+ makeSummaryCsvLine('3e', 'CM-handle ID search with Trust Level filter', 'milliseconds', 'cm_handle_id_search_trust_level_filter', 1600, testResults, scenarioConfig),
+ makeSummaryCsvLine('4a', 'CM-handle search with No filter', 'milliseconds', 'cm_handle_search_no_filter', 18000, testResults, scenarioConfig),
+ makeSummaryCsvLine('4b', 'CM-handle search with Module filter', 'milliseconds', 'cm_handle_search_module_filter', 18000, testResults, scenarioConfig),
+ makeSummaryCsvLine('4c', 'CM-handle search with Property filter', 'milliseconds', 'cm_handle_search_property_filter', 18000, testResults, scenarioConfig),
+ makeSummaryCsvLine('4d', 'CM-handle search with Cps Path filter', 'milliseconds', 'cm_handle_search_cps_path_filter', 18000, testResults, scenarioConfig),
+ makeSummaryCsvLine('4e', 'CM-handle search with Trust Level filter', 'milliseconds', 'cm_handle_search_trust_level_filter', 18000, testResults, scenarioConfig),
+ makeSummaryCsvLine('5b', 'NCMP overhead for Synchronous single CM-handle pass-through read with alternate id', 'milliseconds', 'ncmp_read_overhead', 18, testResults, scenarioConfig),
+ makeSummaryCsvLine('6b', 'NCMP overhead for Synchronous single CM-handle pass-through write with alternate id', 'milliseconds', 'ncmp_write_overhead', 18, testResults, scenarioConfig),
+ makeSummaryCsvLine('7', 'Legacy batch read operation', 'events/second', 'legacy_batch_read', 200, testResults, scenarioConfig),
+ makeSummaryCsvLine('8', 'Write data job scenario - small', 'milliseconds', 'dcm_write_data_job_small', 100, testResults, scenarioConfig),
+ makeSummaryCsvLine('9', 'Write data job scenario - large', 'milliseconds', 'dcm_write_data_job_large', 8000, testResults, scenarioConfig),
];
return summaryCsvLines.join('\n') + '\n';
}
-function makeSummaryCsvLine(testNumber, testName, unit, measurementName, currentExpectation, metrics, thresholds) {
- const thresholdCondition = JSON.parse(JSON.stringify(thresholds[measurementName]))[0];
- const [metricsFunction, thresholdOperator, thresholdValue] = thresholdCondition.split(/\s+/);
- const actualValue = metrics[measurementName].values[metricsFunction].toFixed(3);
+function makeSummaryCsvLine(testNumber, testName, unit, measurementName, currentExpectation, testResults, scenarioConfig) {
+ const thresholdArray = JSON.parse(JSON.stringify(scenarioConfig.thresholds[measurementName]));
+ const thresholdString = thresholdArray[0];
+ const [thresholdKey, thresholdOperator, thresholdValue] = thresholdString.split(/\s+/);
+ const actualValue = testResults.metrics[measurementName].values[thresholdKey].toFixed(3);
return `${testNumber},${testName},${unit},${thresholdValue},${currentExpectation},${actualValue}`;
}
"duration": "2h"
}
}
-}
+}
\ No newline at end of file
"startTime": "923ms"
}
},
- "thresholds": "#THRESHOLDS-PLACEHOLDER#"
-}
+ "thresholds": {
+ "http_req_failed": ["rate <= 0.01"],
+ "cm_handles_created": ["avg >= 22"],
+ "cm_handles_deleted": ["avg >= 22"],
+ "cm_handle_id_search_no_filter": ["avg <= 2600"],
+ "cm_handle_id_search_module_filter": ["avg <= 2600"],
+ "cm_handle_id_search_property_filter": ["avg <= 2600"],
+ "cm_handle_id_search_cps_path_filter": ["avg <= 2600"],
+ "cm_handle_id_search_trust_level_filter": ["avg <= 2600"],
+ "cm_handle_search_no_filter": ["avg <= 24000"],
+ "cm_handle_search_module_filter": ["avg <= 24000"],
+ "cm_handle_search_property_filter": ["avg <= 24000"],
+ "cm_handle_search_cps_path_filter": ["avg <= 24000"],
+ "cm_handle_search_trust_level_filter": ["avg <= 24000"],
+ "ncmp_read_overhead": ["avg <= 40"],
+ "ncmp_write_overhead": ["avg <= 40"],
+ "legacy_batch_read": ["avg >= 150"],
+ "dcm_write_data_job_small": ["avg <= 500"],
+ "dcm_write_data_job_large": ["avg <= 30000"]
+ }
+}
\ No newline at end of file
+++ /dev/null
-[
- {
- "label": "0",
- "name": "HTTP request failures for all tests",
- "unit": "rate of failed requests",
- "metric": "http_req_failed",
- "cpsAverage": 0.00,
- "kpiThreshold": 0.01
- },
- {
- "label": "1",
- "name": "Registration of CM-handles",
- "unit": "CM-handles/second",
- "metric": "cm_handles_created",
- "cpsAverage": 100,
- "kpiThreshold": 22
- },
- {
- "label": "2",
- "name": "De-registration of CM-handles",
- "unit": "CM-handles/second",
- "metric": "cm_handles_deleted",
- "cpsAverage": 180,
- "kpiThreshold": 22
- },
- {
- "label": "3a",
- "name": "CM-handle ID search with No filter",
- "unit": "milliseconds",
- "metric": "cm_handle_id_search_no_filter",
- "cpsAverage": 550,
- "kpiThreshold": 2600
- },
- {
- "label": "3b",
- "name": "CM-handle ID search with Module filter",
- "unit": "milliseconds",
- "metric": "cm_handle_id_search_module_filter",
- "cpsAverage": 2300,
- "kpiThreshold": 2600
- },
- {
- "label": "3c",
- "name": "CM-handle ID search with Property filter",
- "unit": "milliseconds",
- "metric": "cm_handle_id_search_property_filter",
- "cpsAverage": 1450,
- "kpiThreshold": 2600
- },
- {
- "label": "3d",
- "name": "CM-handle ID search with Cps Path filter",
- "unit": "milliseconds",
- "metric": "cm_handle_id_search_cps_path_filter",
- "cpsAverage": 1500,
- "kpiThreshold": 2600
- },
- {
- "label": "3e",
- "name": "CM-handle ID search with Trust Level filter",
- "unit": "milliseconds",
- "metric": "cm_handle_id_search_trust_level_filter",
- "cpsAverage": 1600,
- "kpiThreshold": 2600
- },
- {
- "label": "4a",
- "name": "CM-handle search with No filter",
- "unit": "milliseconds",
- "metric": "cm_handle_search_no_filter",
- "cpsAverage": 18000,
- "kpiThreshold": 24000
- },
- {
- "label": "4b",
- "name": "CM-handle search with Module filter",
- "unit": "milliseconds",
- "metric": "cm_handle_search_module_filter",
- "cpsAverage": 18000,
- "kpiThreshold": 24000
- },
- {
- "label": "4c",
- "name": "CM-handle search with Property filter",
- "unit": "milliseconds",
- "metric": "cm_handle_search_property_filter",
- "cpsAverage": 18000,
- "kpiThreshold": 24000
- },
- {
- "label": "4d",
- "name": "CM-handle search with Cps Path filter",
- "unit": "milliseconds",
- "metric": "cm_handle_search_cps_path_filter",
- "cpsAverage": 18000,
- "kpiThreshold": 24000
- },
- {
- "label": "4e",
- "name": "CM-handle search with Trust Level filter",
- "unit": "milliseconds",
- "metric": "cm_handle_search_trust_level_filter",
- "cpsAverage": 18000,
- "kpiThreshold": 24000
- },
- {
- "label": "5b",
- "name": "NCMP overhead for Synchronous single CM-handle pass-through read with alternate id",
- "unit": "milliseconds",
- "metric": "ncmp_read_overhead",
- "cpsAverage": 18,
- "kpiThreshold": 40
- },
- {
- "label": "6b",
- "name": "NCMP overhead for Synchronous single CM-handle pass-through write with alternate id",
- "unit": "milliseconds",
- "metric": "ncmp_write_overhead",
- "cpsAverage": 18,
- "kpiThreshold": 40
- },
- {
- "label": "7",
- "name": "Legacy batch read operation",
- "unit": "events/second",
- "metric": "legacy_batch_read",
- "cpsAverage": 200,
- "kpiThreshold": 150
- },
- {
- "label": "8",
- "name": "Write data job scenario - small",
- "unit": "milliseconds",
- "metric": "dcm_write_data_job_small",
- "cpsAverage": 100,
- "kpiThreshold": 500
- },
- {
- "label": "9",
- "name": "Write data job scenario - large",
- "unit": "milliseconds",
- "metric": "dcm_write_data_job_large",
- "cpsAverage": 8000,
- "kpiThreshold": 30000
- }
-]
+++ /dev/null
-#!/bin/bash
-#
-# Copyright 2025 OpenInfra Foundation Europe. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# This script is used to generate K6 performance testing 'thresholds' declarations file by:
-# 1) Extracting thresholds from metric metadata JSON (scenario-metadata.json)
-# 2) Injects them into a scenario execution template file (scenario-execution-definition.tmpl) and later writes
-# it into config file named as {performance-test-profile-name}-scenario-execution-definition.json.
-# Note: {performance-test-profile-name} : There are two test profiles that can be run with either: kpi or endurance.
-#
-
-# Path to the JSON file containing metric metadata.
-# This JSON holds metric names, units, and threshold values.
-SCENARIO_METADATA_FILE="./config/scenario-metadata.json"
-
-# Scenario JSON template file for scenario execution configuration.
-# Contains placeholder (#THRESHOLDS-PLACEHOLDER#) to be replaced with actual threshold values.
-SCENARIO_CONFIG_TEMPLATE_FILE="./templates/scenario-execution-definition.tmpl"
-
-# Final json scenario execution configuration file with thresholds injected.
-SCENARIO_CONFIG_OUTPUT_FILE="./config/kpi-scenario-execution-definition.json"
-
-# ─────────────────────────────────────────────────────────────
-# Function: create_thresholds
-# Description:
-# Prepares threshold expressions for each metric.
-# Parameters:
-# $1 - Path to the metric metadata JSON file. (scenario-metadata.json)
-# Returns:
-# JSON object array containing thresholds for each metric.
-# ─────────────────────────────────────────────────────────────
-create_thresholds() {
- local scenario_metadata_json_file="$1"
-
- # Define the jq script to build the thresholds JSON object
- read -r -d '' thresholds_per_metric_as_json << 'EOF'
- # Set threshold expression based on metric type.
- reduce .[] as $metric (
- {};
- .[$metric.metric] = (
- if $metric.metric == "http_req_failed" then
- ["rate <= \($metric.kpiThreshold)"] # For failure rate metric, threshold is rate <= value
- elif ($metric.unit | test("/second")) then
- ["avg >= \($metric.kpiThreshold)"] # For per-second metrics, expect average >= threshold
- else
- ["avg <= \($metric.kpiThreshold)"] # Otherwise, average <= threshold
- end
- )
- )
-EOF
-
- # This returns a JSON object with:
- # - 'thresholds': array of JS declaration strings
- jq -r "$thresholds_per_metric_as_json" "$scenario_metadata_json_file"
-}
-
-# ─────────────────────────────────────────────────────────────
-# Function: inject_thresholds_into_scenario-execution
-# Description:
-# Injects the extracted threshold JSON object into the scenario
-# configuration template by replacing the `.thresholds` named property.
-# Parameters:
-# $1 - JSON string of threshold mappings. (scenario-metadata.json)
-# $2 - Template scenario config file path. (scenario-execution-definition.tmpl)
-# $3 - Output scenario config file path (kpi-scenario-execution-definition.json)
-# Returns:
-# Writes the updated JSON to output file. (kpi-scenario-execution-definition.json)
-# ─────────────────────────────────────────────────────────────
-inject_thresholds_into_scenario_execution_config() {
- local thresholds_json="$1"
- local scenario_execution_template_file="$2"
- local scenario_execution_output_file="$3"
-
- # Use jq to overwrite the `.thresholds` property in the template with the generated thresholds JSON
- jq --argjson thresholds "$thresholds_json" '.thresholds = $thresholds' "$scenario_execution_template_file" | jq '.' > "$scenario_execution_output_file"
-}
-
-# ─────────────────────────────────────────────────────────────
-# Main script execution starts here
-# ─────────────────────────────────────────────────────────────
-
-# Inform user script is starting threshold generation
-echo "Generating thresholds from [$SCENARIO_METADATA_FILE]..."
-
-# Calling function to extract threshold JSON object from metric metadata JSON file
-scenario_execution_thresholds_json=$(create_thresholds "$SCENARIO_METADATA_FILE")
-
-# Inject the extracted thresholds json block into the scenario config template and write into output file
-inject_thresholds_into_scenario_execution_config "$scenario_execution_thresholds_json" "$SCENARIO_CONFIG_TEMPLATE_FILE" "$SCENARIO_CONFIG_OUTPUT_FILE"
-
-# Final confirmation message on successful injection
-echo "Threshold block has been injected into [$SCENARIO_CONFIG_OUTPUT_FILE]"
\ No newline at end of file
+++ /dev/null
-#!/bin/bash
-#
-# Copyright 2025 OpenInfra Foundation Europe. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# This script is used to generate K6 performance testing 'Trend' declarations file by:
-# 1) Extracting trends from metric metadata JSON (scenario-metadata.json)
-# 2) Injects them into a JavaScript template file (scenario-javascript.tmpl) and then write it into final JavaScript
-# file named as scenario-javascript.json.
-#
-
-# Path to the JSON file that contains metric definitions (name, unit, threshold, etc.)
-# This JSON holds metric names, units, and threshold values.
-SCENARIO_METADATA_FILE="./config/scenario-metadata.json"
-
-# Path to the JS template file where the placeholder `#METRICS-TRENDS-PLACEHOLDER#` exists.
-# This is where the generated trend declarations will be inserted.
-SCENARIO_JAVASCRIPT_TEMPLATE_FILE="./templates/scenario-javascript.tmpl"
-
-# Output JavaScript file where the final result (with inserted trend declarations) will be saved.
-SCENARIO_JAVASCRIPT_OUTPUT_FILE="scenario-javascript.js"
-
-# ─────────────────────────────────────────────────────────────────────────────────
-# Function: create_trend_declarations
-# Description:
-# Converts metrics from the metadata JSON into JavaScript `Trend` declarations.
-# These declarations are used for K6 performance testing reports.
-# Parameters:
-# $1 - Accept the path to the metric metadata file as input. (scenario-metadata.json)
-# Returns:
-# trend declarations as JSON array object
-# ─────────────────────────────────────────────────────────────────────────────────
-
-create_trend_declarations() {
- local scenario_metadata_json_file="$1"
-
- # Read and assign a JQ script to a here-document variable. (trend_declarations)
- # `-r` makes jq output raw strings and `-d ''` lets us use multiline input.
- read -r -d '' trend_declarations << 'EOF'
- # Define a helper function (toCamelCase) that converts metric names from snake_case to camelCase
- # (for JS variable names for example: cm_handles_deleted → cmHandlesDeleted)
- def toCamelCase:
- split("_") as $parts | # Split string by "_"
- ($parts[0]) + # Keep first part as it is
- ($parts[1:] | map((.[0:1] | ascii_upcase) + .[1:]) # Capitalize rest of each word
- | join("")); # Join all parts into one string
-
- # Loop through each metric item and generate a JavaScript `Trend` declaration if unit matches.
- .[] # Iterate through array
- | select((.unit == "milliseconds") or (.unit | test("/second"))) # Select based on valid units
- | "export let \(.metric | toCamelCase)Trend = new Trend('\(.metric)', \(.unit == "milliseconds"));"
- # Output javascript declaration string: `export let abcTrend = new Trend('abc', true/false);`
-EOF
- # Execute the jq script on the metadata file to generate the trend declarations
- jq -r "$trend_declarations" "$scenario_metadata_json_file"
-}
-
-# ─────────────────────────────────────────────────────────────
-# Function: inject_trends_into_js_template
-# Description:
-# Replaces the placeholder line `#METRICS-TRENDS-PLACEHOLDER#` in the template
-# file with actual JS trend declarations.
-# Parameters:
-# $1 - JSON string of threshold mappings. Trend declaration strings.
-# for example: export let abcTrend = new Trend('abc', true), from scenario-metadata.json)
-# $2 - Template scenario javascript file path. (scenario-javascript.tmpl)
-# $3 - Output scenario script file path (scenario-javascript.js)
-# Returns:
-# Writes the updated JSON to output file. (scenario-javascript.js)
-# ─────────────────────────────────────────────────────────────
-inject_trends_into_javascript_template() {
- local trend_declarations="$1"
- local scenario_javascript_template_file="$2"
- local scenario_javascript_output_file="$3"
-
- # Use awk to replace the placeholder line with trend declarations
- awk -v trends="$trend_declarations" ' # Pass trends into awk variable
- {
- if ($0 ~ /#METRICS-TRENDS-PLACEHOLDER#/) {
- print trends # Print the trend declarations instead of the placeholder
- } else {
- print $0 # Otherwise, print the original line
- }
- }
- ' "$scenario_javascript_template_file" > "$scenario_javascript_output_file" # Save the transformed content into the output JS file
-}
-
-# ─────────────────────────────────────────────────────────────
-# Main Execution Starts Here
-# ─────────────────────────────────────────────────────────────
-
-# Display log message to inform that generation has started
-echo "Generating trend declarations from [$SCENARIO_METADATA_FILE]..."
-
-# Calling trend generation function
-scenario_javascript_trend_declarations=$(create_trend_declarations "$SCENARIO_METADATA_FILE")
-
-# Inject the generated trends into the JavaScript template and write it into scenario output file
-inject_trends_into_javascript_template "$scenario_javascript_trend_declarations" "$SCENARIO_JAVASCRIPT_TEMPLATE_FILE" "$SCENARIO_JAVASCRIPT_OUTPUT_FILE"
-
-# Final confirmation message to indicate success
-echo "Trend declarations inserted into [$SCENARIO_JAVASCRIPT_OUTPUT_FILE]"
\ No newline at end of file
# ─────────────────────────────────────────────────────────────
# 📌 Global Variables
# ─────────────────────────────────────────────────────────────
-number_of_failures=0
+threshold_failures=0
testProfile=$1
summaryFile="${testProfile}Summary.csv"
-# Path to the JSON file containing metric metadata.
-# This JSON holds metric names, units, and threshold values.
-SCENARIO_METADATA_FILE="./config/scenario-metadata.json"
-
-echo
-echo "📢 Running NCMP K6 performance test for profile: [$testProfile]"
-echo
-
-chmod +x ./create-scenario-javascript.sh
-source ./create-scenario-javascript.sh
-
-if [[ "$testProfile" == "kpi" ]]; then
- chmod +x ./create-scenario-execution-definition.sh
- source ./create-scenario-execution-definition.sh
-fi
+echo "Running $testProfile performance tests..."
# ─────────────────────────────────────────────────────────────
# Run K6 and Capture Output
+# '$?' is immediately captures the exit code after k6 finishes,
+# and assign it to k6_exit_code.
# ─────────────────────────────────────────────────────────────
-k6 run scenario-javascript.js -e TEST_PROFILE="$testProfile" > "$summaryFile"
+k6 run ncmp-test-runner.js --quiet -e TEST_PROFILE="$testProfile" > "$summaryFile"
k6_exit_code=$?
case $k6_exit_code in
- 0) echo "✅ K6 executed successfully for profile: [$testProfile]." ;;
- 99) echo "⚠️ K6 thresholds failed (exit code 99). Processing failures..." ;;
- *) echo "❌ K6 execution error (exit code $k6_exit_code)."; number_of_failures=$((number_of_failures + 1)) ;;
+ 0) echo "✅ K6 executed successfully for profile: [$testProfile]" ;;
+ 99) echo "⚠️ K6 thresholds failed (exit code 99)" ;;
+ *) echo "❌ K6 execution error (exit code $k6_exit_code)";;
esac
-if [[ "$testProfile" == "kpi" ]]; then
-# ─────────────────────────────────────────────────────────────
-# Extract and Filter Summary Data
-# ─────────────────────────────────────────────────────────────
-if [ -f "$summaryFile" ]; then
- echo "🔍 Extracting expected test names from metadata..."
- expected_tests=()
- while IFS= read -r test_name; do
- [[ -n "$test_name" ]] && expected_tests+=("$test_name")
- done < <(jq -r '.[].name' "$SCENARIO_METADATA_FILE")
-
- if [[ ${#expected_tests[@]} -eq 0 ]]; then
- echo "❌ No test names found in metadata. Aborting."
- exit 1
- fi
-
- filtered_summary=$(mktemp)
-
- # Extract the CSV header line starting with '#'
- grep -m 1 "^#" "$summaryFile" > "$filtered_summary"
+###############################################################################
+# Adds a “Result” column with ✅ / ❌ to the given summary file
+# • Increments global variable `threshold_failures` for each ❌ row
+# NR == 1 catches the header only once, appending “Result”
+# PASS rules
+# • Throughput Tests #1, #2, #7: PASS when Actual ≥ Fs Requirement
+# • All others (Duration Tests): PASS when Actual ≤ Fs Requirement
+###############################################################################
+addResultColumn() {
+ local summaryFile="$1"
+ local tmp
+ tmp=$(mktemp)
+
+ awk -F',' -v OFS=',' '
+ NR == 1 { print $0, "Result"; next }
+ {
+ throughputTests = ($1 == "1" || $1 == "2" || $1 == "7")
+ passCondition = throughputTests ? (($6+0) >= ($4+0)) : (($6+0) <= ($4+0))
+ print $0, (passCondition ? "✅" : "❌")
+ }
+ ' "$summaryFile" > "$tmp"
+
+ mv "$tmp" "$summaryFile"
+
+ # how many failures (❌) occurred?
+ local newFails
+ newFails=$(grep -c '❌' "$summaryFile")
+ threshold_failures=$(( threshold_failures + newFails ))
+}
- # Match each expected test name with summary rows
- for test_name in "${expected_tests[@]}"; do
- trimmedTestName="$(echo "$test_name" | xargs)"
- matched_line=$(grep -F "$trimmedTestName" "$summaryFile")
- [[ -n "$matched_line" ]] && echo "$matched_line" >> "$filtered_summary" || echo "⚠️ Result not found for [$trimmedTestName]"
- done
+if [ -f "$summaryFile" ]; then
# Output raw CSV for plotting job
- echo -e "\n📊 -- BEGIN CSV REPORT --"
- cat "$filtered_summary"
- echo -e "📊 -- -- END CSV REPORT --\n"
-
- # ─────────────────────────────────────────────────────────────
- # Evaluate FS Thresholds
- # ─────────────────────────────────────────────────────────────
-
- # Evaluate FS pass/fail thresholds
- annotated_summary=$(mktemp)
- threshold_failures=0
-
- # Append header with new column "Pass FS"
- head -n 1 "$filtered_summary" | awk '{print $0",Pass FS"}' > "$annotated_summary"
- tail -n +2 "$filtered_summary" > tmp_input
-
- # Exit early if no valid test results were found in the filtered summary
- if [[ ! -s tmp_input ]]; then
- echo "⚠️ No valid test results found in [$summaryFile]. Skipping FS evaluation."
- echo "❌ Summary: No tests were executed or matched expected names."
- ((number_of_failures++))
- rm -f tmp_input "$summaryFile" "$filtered_summary"
- popd >/dev/null || true
- exit $number_of_failures
- fi
-
- # Process each test case (skip header and check values) append pass/fail to annotated_summary
- while IFS=, read -r id test_name unit fs_requirement current_expected actual_value; do
- [[ -z "$test_name" ]] && continue
-
- # Trim whitespace from fs_requirement and actual
- fs_req=$(echo "$fs_requirement" | xargs)
- actual_val=$(echo "$actual_value" | xargs)
- fs_pass_status="✅"
-
- # Special case: zero actual is valid, assign ✅ without warning
- if [[ "$test_name" == "HTTP request failures for all tests" ]]; then
- if [[ "$actual_val" != "0" && "$actual_val" != "0.000" ]]; then
- fs_condition_met=$(awk -v a="$actual_val" -v r="$fs_req" 'BEGIN { print (a <= r) ? 1 : 0 }')
- [[ "$fs_condition_met" -ne 1 ]] && fs_pass_status="❌" && ((threshold_failures++))
- fi
- else
-
- # For all other tests: if actual is 0 or 0.000, mark as ❌ failure
- if [[ "$actual_val" == "0" || "$actual_val" == "0.000" ]]; then
- fs_pass_status="❌"
- echo "❌ Error: Actual value for metric '$test_name' is 0. This may indicate an error or missing data."
- ((threshold_failures++))
- else
- if [[ "$unit" == *"millisecond"* || "$unit" == *"rate of failed requests"* ]]; then
- fs_condition_met=$(awk -v a="$actual_val" -v r="$fs_req" 'BEGIN { print (a <= r) ? 1 : 0 }')
- else
- fs_condition_met=$(awk -v a="$actual_val" -v r="$fs_req" 'BEGIN { print (a >= r) ? 1 : 0 }')
- fi
- [[ "$fs_condition_met" -ne 1 ]] && fs_pass_status="❌" && ((threshold_failures++))
- fi
- fi
-
- echo "$id,$test_name,$unit,$fs_requirement,$current_expected,$actual_value,$fs_pass_status" >> "$annotated_summary"
- done < tmp_input
- rm -f tmp_input
-
- # ─────────────────────────────────────────────────────────────
- # Print Human-Readable Report
- # ─────────────────────────────────────────────────────────────
- table_preview=$(column -t -s, "$annotated_summary")
-
- # Compute table width safely
- table_width=$(echo "$table_preview" | awk '{ if (length > max) max = length } END { print max }')
+ echo "-- BEGIN CSV REPORT"
+ cat "$summaryFile"
+ echo "-- END CSV REPORT"
+ echo
- # Fallback if table_width is empty or not a valid number
- if ! [[ "$table_width" =~ ^[0-9]+$ ]]; then
- table_width=80
+ # Output human-readable report
+ echo "####################################################################################################"
+ if [ "$testProfile" = "kpi" ]; then
+ echo "## K 6 K P I P E R F O R M A N C E T E S T R E S U L T S ##"
fi
+ echo "####################################################################################################"
+ addResultColumn "$summaryFile"
+ column -t -s, "$summaryFile"
+ echo
- # Now safely create the border line
- border_line=$(printf '#%.0s' $(seq 1 "$table_width"))
-
- format_title_spaced() {
- local input="$1"
- local result=""
- for word in $input; do
- for ((i=0; i<${#word}; i++)); do
- result+="${word:$i:1} "
- done
- result+=" "
- done
- echo "$result"
- }
-
- # Pad title string to center it in the table width
- raw_title="K6 ${testProfile^^} PERFORMANCE TEST RESULTS"
-
- # Dynamically center title within the line
- title="$(format_title_spaced "$raw_title")"
- title_line=$(printf "## %*s %*s##" \
- $(( (table_width - 6 + ${#title}) / 2 )) "$title" \
- $(( (table_width - 6 - ${#title}) / 2 )) "")
-
- # Print header
- echo "$border_line"
- echo "$title_line"
- echo "$border_line"
+ # Clean up
+ rm -f "$summaryFile"
- # Then print the table
- echo "$table_preview"
+else
+ echo "Error: Failed to generate $summaryFile" >&2
+fi
- # Print closing border after the table
- echo "$border_line"
- echo
+# Change the directory back where it was
+popd >/dev/null || exit 1
- # 🎯 Final FS Summary of threshold result
+# 🎯 Final FS Summary of threshold result and exit if needed
+if [[ "$testProfile" == "kpi" ]]; then
if (( threshold_failures > 0 )); then
echo "❌ Summary: [$threshold_failures] test(s) failed FS requirements."
echo
echo "❗ Number of failures or threshold breaches: $threshold_failures"
echo "Please check the summary reports and logs above for details."
echo "Investigate any failing metrics and consider re-running the tests after fixes."
- echo
- ((number_of_failures++))
+ exit $threshold_failures
else
echo "✅ All tests passed FS requirements."
echo "✅ No threshold violations or execution errors detected."
echo "You can review detailed results in the generated summary."
fi
-
- # Cleanup temp files related to reporting
- rm -f "$filtered_summary" "$annotated_summary"
-
-else # no summary file
- echo "❌ Error: Summary file [$summaryFile] was not generated. Possible K6 failure."
- ((number_of_failures++))
-fi
else
# ─────────────────────────────────────────────────────────────
# Endurance Profile: Investigative Guidance
echo " - Error rates and failed checks"
echo " - Container memory growth over time (especially in endurance tests)"
echo
- echo " • 📄 Logs and Summary:"
- echo " - Check '$summaryFile' for raw execution summary."
+ echo " • 📄 Logs:"
echo " - Inspect logs for timeout/retries/exception patterns."
echo
echo "ℹ️ Reminder: For KPI validation with FS thresholds, re-run with profile: 'kpi'"
- echo
-fi # end of testProfile check
-
-# Cleanup global temp file
-rm -f "$summaryFile"
-
-# final exit
-popd >/dev/null || true
-exit $number_of_failures
\ No newline at end of file
+ exit 0
+fi
\ No newline at end of file
import { sendBatchOfKafkaMessages } from './common/produce-avc-event.js';
import { executeWriteDataJob } from "./common/write-data-job.js";
-#METRICS-TRENDS-PLACEHOLDER#
+let cmHandlesCreatedTrend = new Trend('cm_handles_created', false);
+let cmHandlesDeletedTrend = new Trend('cm_handles_deleted', false);
+let cmHandleIdSearchNoFilterTrend = new Trend('cm_handle_id_search_no_filter', true);
+let cmHandleIdSearchModuleFilterTrend = new Trend('cm_handle_id_search_module_filter', true);
+let cmHandleIdSearchPropertyFilterTrend = new Trend('cm_handle_id_search_property_filter', true);
+let cmHandleIdSearchCpsPathFilterTrend = new Trend('cm_handle_id_search_cps_path_filter', true);
+let cmHandleIdSearchTrustLevelFilterTrend = new Trend('cm_handle_id_search_trust_level_filter', true);
+let cmHandleSearchNoFilterTrend = new Trend('cm_handle_search_no_filter', true);
+let cmHandleSearchModuleFilterTrend = new Trend('cm_handle_search_module_filter', true);
+let cmHandleSearchPropertyFilterTrend = new Trend('cm_handle_search_property_filter', true);
+let cmHandleSearchCpsPathFilterTrend = new Trend('cm_handle_search_cps_path_filter', true);
+let cmHandleSearchTrustLevelFilterTrend = new Trend('cm_handle_search_trust_level_filter', true);
+let ncmpReadOverheadTrend = new Trend('ncmp_read_overhead', true);
+let ncmpWriteOverheadTrend = new Trend('ncmp_write_overhead', true);
+let legacyBatchReadTrend = new Trend('legacy_batch_read', false);
+let dcmWriteDataJobSmallTrend = new Trend('dcm_write_data_job_small', true);
+let dcmWriteDataJobLargeTrend = new Trend('dcm_write_data_job_large', true);
const EXPECTED_WRITE_RESPONSE_COUNT = 1;
const nextBatchOfCmHandleIds = makeBatchOfCmHandleIds(REGISTRATION_BATCH_SIZE, batchNumber);
const response = deleteCmHandles(nextBatchOfCmHandleIds);
if (response.error_code === 0) {
- numberOfDeregisteredCmHandles += REGISTRATION_BATCH_SIZE
+ numberOfDeregisteredCmHandles += REGISTRATION_BATCH_SIZE
}
check(response, { 'delete CM-handles status equals 200': (response) => response.status === 200 });
}
if (testProfile === 'kpi') {
console.log("✅ Generating KPI summary...");
return {
- stdout: makeCustomSummaryReport(data.metrics, options.thresholds),
+ stdout: makeCustomSummaryReport(data, options),
};
}
console.log("⛔ Skipping KPI summary (not in 'kpi' profile)");
return {};
-}
+}
\ No newline at end of file