- Added proper Bash shebang (`#!/bin/bash`) to ensure correct shell execution.
- Replaced Bash-specific arithmetic syntax `((...))` with POSIX-compliant `$((...))`.
- Fixed syntax error in `case` statement caused by unsupported token in `sh`.
Issue-ID: CPS-2844
Change-Id: Iaf8e225388f0e11223bed722fde72be5bf25bb0b
Signed-off-by: sourabh_sourabh <sourabh.sourabh@est.tech>
}
/**
- * Validates a response against an expected HTTP status and an optional additional check.
- * If successful, records a metric value to a trend. Logs detailed error output on failure.
+ * Processes HTTP response and records duration minus known overhead (e.g., artificial delay).
*
- * @param {Object} response - The HTTP response object from a K6 request.
- * @param {number} expectedStatus - The expected HTTP status code (e.g., 200, 202).
- * @param {string} checkLabel - The label to use in the K6 `check()` for reporting.
- * @param {Trend} trendMetric - A K6 `Trend` metric to which the extracted value will be added on success.
- * @param {function(Object): number} metricExtractor - A function that takes the response and returns a numeric value to record (e.g., `res.timings.duration`).
- * @param {function(Object): boolean} [additionalCheckValidator=() => true] - Optional function for any additional custom validation on the response.
+ * @param {Object} httpResponse
+ * @param {number} expectedStatusCode
+ * @param {string} checkLabel
+ * @param {number} delayInMs - Overhead to subtract
+ * @param {Trend} trendMetric
+ */
+export function validateResponseAndRecordMetricWithOverhead(httpResponse, expectedStatusCode, checkLabel, delayInMs, trendMetric) {
+ recordMetricIfResponseValid(httpResponse, expectedStatusCode, checkLabel, trendMetric,
+ (httpResponse) => httpResponse.timings.duration - delayInMs
+ );
+}
+
+/**
+ * Validates that the JSON response is an array of expected length and records duration.
*
+ * @param {Object} httpResponse
+ * @param {number} expectedStatusCode
+ * @param {string} checkLabel
+ * @param {number} expectedArrayLength
+ * @param {Trend} trendMetric
*/
-export function validateAndRecordMetric(response, expectedStatus, checkLabel, trendMetric, metricExtractor, additionalCheckValidator = () => true) {
+export function validateResponseAndRecordMetric(httpResponse, expectedStatusCode, checkLabel, expectedArrayLength, trendMetric) {
+ recordMetricIfResponseValid(httpResponse, expectedStatusCode, checkLabel, trendMetric, (response) => response.timings.duration, (response) => {
+ const status = response.status;
+ const body = typeof response.body === 'string' ? response.body.trim() : '';
+ if (!body) {
+ return {valid: false, reason: `Status ${status} - Empty response body`};
+ }
+ try {
+ const arrayLength = response.json('#');
+ const valid = arrayLength === expectedArrayLength;
+ return {
+ valid,
+ reason: valid ? undefined : `Status ${status} - Expected array length ${expectedArrayLength}, but got ${arrayLength}`
+ };
+ } catch (e) {
+ return {valid: false, reason: `Status ${status} - JSON parse error: ${e.message}`};
+ }
+ });
+}
- const isExpectedStatus = response.status === expectedStatus;
- const isAdditionalCheckValid = additionalCheckValidator(response);
- const isSuccess = check(response, {
- [checkLabel]: () => isExpectedStatus && isAdditionalCheckValid,
+function recordMetricIfResponseValid(httpResponse, expectedStatusCode, checkLabel, metricRecorder, metricValueExtractor, customValidatorFn = () => ({
+ valid: true,
+ reason: undefined
+})) {
+ const isExpectedStatusMatches = httpResponse.status === expectedStatusCode;
+ const {valid: isCustomValidationPasses, reason = ""} = customValidatorFn(httpResponse);
+
+ const isSuccess = check(httpResponse, {
+ [checkLabel]: () => isExpectedStatusMatches && isCustomValidationPasses,
});
if (isSuccess) {
- trendMetric.add(metricExtractor(response));
+ metricRecorder.add(metricValueExtractor(httpResponse));
} else {
- console.error(`${checkLabel} failed. Status: ${response.status}`);
- if (response.body) {
- try {
- const responseBody = JSON.parse(response.body);
- console.error(`❌ ${checkLabel} failed: Error response status: ${response.status}, message: ${responseBody.message}, details: ${responseBody.details}`);
- } catch (e) {
- console.error(`❌ ${checkLabel} failed: Unable to parse response body.`);
- }
- }
+ logDetailedFailure(httpResponse, isExpectedStatusMatches, checkLabel, reason);
}
}
-export function processHttpResponseWithOverheadMetrics(response, expectedStatus, checkLabel, delayMs, trendMetric) {
- validateAndRecordMetric(response, expectedStatus, checkLabel, trendMetric, (res) => res.timings.duration - delayMs);
-}
+function logDetailedFailure(httpResponse, isExpectedStatusMatches, checkLabel, customReason = "") {
+ const {status, url, body} = httpResponse;
+ const trimmedBody = typeof body === 'string' ? body.trim() : '';
-export function validateResponseAndRecordMetric(response, expectedStatus, expectedJsonLength, trendMetric, checkLabel) {
- validateAndRecordMetric(response, expectedStatus, checkLabel, trendMetric, (res) => res.timings.duration, (res) => res.json('#') === expectedJsonLength);
-}
+ // If status is okay but custom check failed, log that reason only
+ if (isExpectedStatusMatches && customReason) {
+ console.error(`❌ ${checkLabel}: Custom validation failed. Reason: ${customReason}. URL: ${url}`);
+ return;
+ }
+ // Categorize status
+ let errorCategory;
+ if (status >= 100 && status < 200) {
+ errorCategory = `Informational Response (${status})`;
+ } else if (status >= 300 && status < 400) {
+ errorCategory = `Redirection (${status})`;
+ } else if (status >= 400 && status < 500) {
+ errorCategory = 'Client Error (4xx)';
+ } else if (status >= 500 && status < 600) {
+ errorCategory = 'Server Error (5xx)';
+ } else if (status === 0) {
+ errorCategory = 'Network Error or Timeout (status = 0 - likely no HTTP response received)';
+ } else {
+ errorCategory = `Unexpected Status (${status})`;
+ }
+
+ if (!trimmedBody) {
+ console.error(`❌ ${checkLabel}: ${errorCategory}. Empty response body. URL: ${url}`);
+ return;
+ }
+
+ try {
+ const responseJson = JSON.parse(trimmedBody);
+ const errorMessage = responseJson && responseJson.message ? responseJson.message : 'No message';
+ const errorDetails = responseJson && responseJson.details ? responseJson.details : 'No details';
+ console.error(`❌ ${checkLabel}: ${errorCategory}. Status: ${status}, Message: ${errorMessage}, Details: ${errorDetails}, URL: ${url}`);
+ } catch (e) {
+ const bodyPreview = trimmedBody.length > 500 ? trimmedBody.slice(0, 500) + '... [truncated]' : trimmedBody;
+ console.error(`❌ ${checkLabel}: ${errorCategory}. Status: ${status}, URL: ${url}. Response is not valid JSON.\n↪️ Raw body preview:\n${bodyPreview}\n✳️ Parse error: ${e.message}`);
+ }
+}
\ No newline at end of file
import { check, sleep } from 'k6';
import { Trend } from 'k6/metrics';
import { Reader } from 'k6/x/kafka';
-import {
+import { testConfig, validateResponseAndRecordMetricWithOverhead,
+ validateResponseAndRecordMetric, makeCustomSummaryReport, makeBatchOfCmHandleIds, makeRandomBatchOfAlternateIds,
TOTAL_CM_HANDLES, READ_DATA_FOR_CM_HANDLE_DELAY_MS, WRITE_DATA_FOR_CM_HANDLE_DELAY_MS,
- makeCustomSummaryReport, makeBatchOfCmHandleIds, makeRandomBatchOfAlternateIds,
LEGACY_BATCH_THROUGHPUT_TEST_BATCH_SIZE, REGISTRATION_BATCH_SIZE,
- KAFKA_BOOTSTRAP_SERVERS, LEGACY_BATCH_TOPIC_NAME, CONTAINER_UP_TIME_IN_SECONDS, testConfig, processHttpResponseWithOverheadMetrics,
- validateResponseAndRecordMetric
+ KAFKA_BOOTSTRAP_SERVERS, LEGACY_BATCH_TOPIC_NAME, CONTAINER_UP_TIME_IN_SECONDS
} from './common/utils.js';
import { createCmHandles, deleteCmHandles, waitForAllCmHandlesToBeReady } from './common/cmhandle-crud.js';
import { executeCmHandleSearch, executeCmHandleIdSearch } from './common/search-base.js';
export function passthroughReadAltIdScenario() {
const response = passthroughRead();
- processHttpResponseWithOverheadMetrics(response, 200, 'passthrough read with alternate Id status equals 200', READ_DATA_FOR_CM_HANDLE_DELAY_MS, ncmpReadOverheadTrend);
+ validateResponseAndRecordMetricWithOverhead(response, 200, 'passthrough read with alternate Id status equals 200', READ_DATA_FOR_CM_HANDLE_DELAY_MS, ncmpReadOverheadTrend);
}
export function passthroughWriteAltIdScenario() {
const response = passthroughWrite();
- processHttpResponseWithOverheadMetrics(response, 201, 'passthrough write with alternate Id status equals 201', WRITE_DATA_FOR_CM_HANDLE_DELAY_MS, ncmpWriteOverheadTrend);
+ validateResponseAndRecordMetricWithOverhead(response, 201, 'passthrough write with alternate Id status equals 201', WRITE_DATA_FOR_CM_HANDLE_DELAY_MS, ncmpWriteOverheadTrend);
}
export function cmHandleIdSearchNoFilterScenario() {
const response = executeCmHandleIdSearch('no-filter');
- validateResponseAndRecordMetric(response, 200, TOTAL_CM_HANDLES, cmHandleIdSearchNoFilterTrend, 'CM handle ID no-filter search');
+ validateResponseAndRecordMetric(response, 200, 'CM handle ID no-filter search', TOTAL_CM_HANDLES, cmHandleIdSearchNoFilterTrend);
}
export function cmHandleSearchNoFilterScenario() {
const response = executeCmHandleSearch('no-filter');
- validateResponseAndRecordMetric(response, 200, TOTAL_CM_HANDLES, cmHandleSearchNoFilterTrend, 'CM handle no-filter search');
+ validateResponseAndRecordMetric(response, 200, 'CM handle no-filter search', TOTAL_CM_HANDLES, cmHandleSearchNoFilterTrend);
}
export function cmHandleIdSearchModuleScenario() {
const response = executeCmHandleIdSearch('module');
- validateResponseAndRecordMetric(response, 200, TOTAL_CM_HANDLES, cmHandleIdSearchModuleFilterTrend, 'CM handle ID module search');
+ validateResponseAndRecordMetric(response, 200, 'CM handle ID module search', TOTAL_CM_HANDLES, cmHandleIdSearchModuleFilterTrend);
}
export function cmHandleSearchModuleScenario() {
const response = executeCmHandleSearch('module');
- validateResponseAndRecordMetric(response, 200, TOTAL_CM_HANDLES, cmHandleSearchModuleFilterTrend, 'CM handle module search');
+ validateResponseAndRecordMetric(response, 200, 'CM handle module search', TOTAL_CM_HANDLES, cmHandleSearchModuleFilterTrend);
}
export function cmHandleIdSearchPropertyScenario() {
const response = executeCmHandleIdSearch('property');
- validateResponseAndRecordMetric(response, 200, TOTAL_CM_HANDLES, cmHandleIdSearchPropertyFilterTrend, 'CM handle ID property search');
+ validateResponseAndRecordMetric(response, 200, 'CM handle ID property search', TOTAL_CM_HANDLES, cmHandleIdSearchPropertyFilterTrend);
}
export function cmHandleSearchPropertyScenario() {
const response = executeCmHandleSearch('property');
- validateResponseAndRecordMetric(response, 200, TOTAL_CM_HANDLES, cmHandleSearchPropertyFilterTrend, 'CM handle property search');
+ validateResponseAndRecordMetric(response, 200, 'CM handle property search', TOTAL_CM_HANDLES, cmHandleSearchPropertyFilterTrend);
}
export function cmHandleIdSearchCpsPathScenario() {
const response = executeCmHandleIdSearch('cps-path-for-ready-cm-handles');
- validateResponseAndRecordMetric(response, 200, TOTAL_CM_HANDLES, cmHandleIdSearchCpsPathFilterTrend, 'CM handle ID cps path search');
+ validateResponseAndRecordMetric(response, 200, 'CM handle ID cps path search', TOTAL_CM_HANDLES, cmHandleIdSearchCpsPathFilterTrend);
}
export function cmHandleSearchCpsPathScenario() {
const response = executeCmHandleSearch('cps-path-for-ready-cm-handles');
- validateResponseAndRecordMetric(response, 200, TOTAL_CM_HANDLES, cmHandleSearchCpsPathFilterTrend, 'CM handle cps path search');
+ validateResponseAndRecordMetric(response, 200, 'CM handle cps path search', TOTAL_CM_HANDLES, cmHandleSearchCpsPathFilterTrend);
}
export function cmHandleIdSearchTrustLevelScenario() {
const response = executeCmHandleIdSearch('trust-level');
- validateResponseAndRecordMetric(response, 200, TOTAL_CM_HANDLES, cmHandleIdSearchTrustLevelFilterTrend, 'CM handle ID trust level search');
+ validateResponseAndRecordMetric(response, 200, 'CM handle ID trust level search', TOTAL_CM_HANDLES, cmHandleIdSearchTrustLevelFilterTrend);
}
export function cmHandleSearchTrustLevelScenario() {
const response = executeCmHandleSearch('trust-level');
- validateResponseAndRecordMetric(response, 200, TOTAL_CM_HANDLES, cmHandleSearchTrustLevelFilterTrend, 'CM handle trust level search');
+ validateResponseAndRecordMetric(response, 200, 'CM handle trust level search', TOTAL_CM_HANDLES, cmHandleSearchTrustLevelFilterTrend);
}
export function legacyBatchProduceScenario() {
const nextBatchOfAlternateIds = makeRandomBatchOfAlternateIds();
const response = legacyBatchRead(nextBatchOfAlternateIds);
- check(response, { 'data operation batch read status equals 200': (response) => response.status === 200 });
+ check(response, {'data operation batch read status equals 200': (response) => response.status === 200});
}
export function writeDataJobLargeScenario() {
const response = executeWriteDataJob(100000);
- validateResponseAndRecordMetric(response, 200, EXPECTED_WRITE_RESPONSE_COUNT, dcmWriteDataJobLargeTrend, 'Large writeDataJob');
+ validateResponseAndRecordMetric(response, 200, 'Large writeDataJob', EXPECTED_WRITE_RESPONSE_COUNT, dcmWriteDataJobLargeTrend);
}
export function writeDataJobSmallScenario() {
const response = executeWriteDataJob(100);
- validateResponseAndRecordMetric(response, 200, EXPECTED_WRITE_RESPONSE_COUNT, dcmWriteDataJobSmallTrend, 'Small writeDataJob');
+ validateResponseAndRecordMetric(response, 200, 'Small writeDataJob', EXPECTED_WRITE_RESPONSE_COUNT, dcmWriteDataJobSmallTrend);
}
export function produceAvcEventsScenario() {
}
export function handleSummary(data) {
- return {
- stdout: makeCustomSummaryReport(data, options),
- };
+ const testProfile = __ENV.TEST_PROFILE;
+ if (testProfile === 'kpi') {
+ console.log("✅ Generating KPI summary...");
+ return {
+ stdout: makeCustomSummaryReport(data, options),
+ };
+ }
+ console.log("⛔ Skipping KPI summary (not in 'kpi' profile)");
+ return {};
}
# ─────────────────────────────────────────────────────────────
# 📁 Navigate to Script Directory
# ─────────────────────────────────────────────────────────────
-pushd "$(dirname "$0")" >/dev/null || { echo "❌ Failed to access script directory. Exiting."; exit 1; }
+pushd "$(dirname "$0")" >/dev/null || {
+ echo "❌ Failed to access script directory. Exiting."
+ exit 1
+}
# ─────────────────────────────────────────────────────────────
# 📌 Global Variables
echo "📢 Running NCMP K6 performance test for profile: [$testProfile]"
echo
-# ─────────────────────────────────────────────────────────────
-# 1️⃣ Generate trend declarations and thresholds from metadata
-# ─────────────────────────────────────────────────────────────
-echo "🔧 Generating trend declarations, thresholds from [$KPI_METADATA_FILE] and updating [$NCMP_RUNNER_FILE] and [$KPI_CONFIG_FILE]..."
+# ───────────────────────────────────────────────────────────────────────────
+# 1️⃣ Generate trend declarations and (conditionally) thresholds from metadata
+# ───────────────────────────────────────────────────────────────────────────
+echo "🔧 Generating trend declarations from [$KPI_METADATA_FILE]..."
read -r -d '' jq_script << 'EOF'
def toCamelCase:
# Execute jq script
jq_output=$(jq -r "$jq_script" "$KPI_METADATA_FILE")
-# Extract trends and thresholds
+# Extract trends
trend_declarations=$(echo "$jq_output" | jq -r '.trends[]')
-thresholds_json=$(echo "$jq_output" | jq '.thresholds')
# Replace placeholder in runner with generated trends
TMP_FILE=$(mktemp)
mv "$TMP_FILE" "$NCMP_RUNNER_FILE"
echo "✅ Trend declarations inserted into [$NCMP_RUNNER_FILE]"
-# Update thresholds in KPI config
-TMP_FILE=$(mktemp)
-cp "$KPI_CONFIG_FILE" "$TMP_FILE"
-jq --argjson thresholds "$thresholds_json" '
- .thresholds = $thresholds
-' "$TMP_FILE" | jq '.' > "$KPI_CONFIG_FILE"
-rm -f "$TMP_FILE"
-echo "✅ Threshold block has been injected into [$KPI_CONFIG_FILE]"
-echo
+# If profile is KPI, generate threshold config too
+if [[ "$testProfile" == "kpi" ]]; then
+ echo "📌 Writing thresholds to [$KPI_CONFIG_FILE]..."
+ # Update thresholds in KPI config
+ # Extract thresholds
+ thresholds_json=$(echo "$jq_output" | jq '.thresholds')
+ TMP_FILE=$(mktemp)
+ cp "$KPI_CONFIG_FILE" "$TMP_FILE"
+ jq --argjson thresholds "$thresholds_json" '.thresholds = $thresholds' "$TMP_FILE" | jq '.' > "$KPI_CONFIG_FILE"
+ rm -f "$TMP_FILE"
+ echo "✅ Threshold block has been injected into [$KPI_CONFIG_FILE]"
+ echo
+fi
# ─────────────────────────────────────────────────────────────
# 2️⃣ Run K6 and Capture Output
case $k6_exit_code in
0) echo "✅ K6 executed successfully for profile: [$testProfile]." ;;
99) echo "⚠️ K6 thresholds failed (exit code 99). Processing failures..." ;;
- *) echo "❌ K6 execution error (exit code $k6_exit_code)."; ((number_of_failures++)) ;;
+ *) echo "❌ K6 execution error (exit code $k6_exit_code)."; number_of_failures=$((number_of_failures + 1)) ;;
esac
+if [[ "$testProfile" == "kpi" ]]; then
# ─────────────────────────────────────────────────────────────
# 3️⃣ Extract and Filter Summary Data
# ─────────────────────────────────────────────────────────────
# 🎯 Final FS Summary of threshold result
if (( threshold_failures > 0 )); then
echo "❌ Summary: [$threshold_failures] test(s) failed FS requirements."
+ echo
+ echo "⚠️ Performance tests completed with issues for profile: [$testProfile]."
+ echo "❗ Number of failures or threshold breaches: $threshold_failures"
+ echo "Please check the summary reports and logs above for details."
+ echo "Investigate any failing metrics and consider re-running the tests after fixes."
+ echo
((number_of_failures++))
else
echo "✅ All tests passed FS requirements."
+ echo "✅ No threshold violations or execution errors detected."
+ echo "You can review detailed results in the generated summary."
fi
- # Cleanup temp files
- rm -f "$summaryFile" "$filtered_summary" "$annotated_summary"
+ # Cleanup temp files related to reporting
+ rm -f "$filtered_summary" "$annotated_summary"
else # no summary file
echo "❌ Error: Summary file [$summaryFile] was not generated. Possible K6 failure."
((number_of_failures++))
fi
+else
+# ─────────────────────────────────────────────────────────────
+# Endurance Profile: Investigative Guidance
+# ─────────────────────────────────────────────────────────────
+ echo
+ echo "🔍 Skipping KPI evaluation for profile [$testProfile]"
+ echo
+ echo "📌 Please use the following tools and dashboards to investigate performance:"
+ echo
+ echo " • 📈 Grafana Dashboards:"
+ echo " - Nordix Prometheus/Grafana can visualize memory and latency trends."
+ echo " - Especially useful for endurance/stability runs."
+ echo " - 🌐 https://monitoring.nordix.org/login"
+ echo " - Dashboards include:"
+ echo " ▪ Check CM Handle operation latency trends over time."
+ echo " ▪ Focus on 'Pass-through Read/Write', 'Search', or 'Kafka Batch' graphs."
+ echo " ▪ Memory usage patterns (cps/ncmp containers)"
+ echo " ▪ Kafka lag and consumer trends (if applicable)"
+ echo
+ echo " • 📊 GnuPlot:"
+ echo " - Optional local alternative to visualize memory trends."
+ echo " - Requires exporting memory data (CSV/JSON) and plotting manually."
+ echo
+ echo " • 🔎 Important Metrics to Watch:"
+ echo " - HTTP duration (avg, p95, max)"
+ echo " - VU concurrency and iteration rates"
+ echo " - Error rates and failed checks"
+ echo " - Container memory growth over time (especially in endurance tests)"
+ echo
+ echo " • 📄 Logs and Summary:"
+ echo " - Check '$summaryFile' for raw execution summary."
+ echo " - Inspect logs for timeout/retries/exception patterns."
+ echo
+ echo "ℹ️ Reminder: For KPI validation with FS thresholds, re-run with profile: 'kpi'"
+ echo
+fi # end of testProfile check
+
+# Cleanup global temp file
+rm -f "$summaryFile"
# ─────────────────────────────────────────────────────────────
# 🔚 Final Exit
# limitations under the License.
#
+set -e
+
# Load image check and logging functions
source "$(dirname "$0")/verify-docker-image-digests.sh"
+# Accept test profile (e.g., 'kpi', 'endurance') as first argument
testProfile=$1
+
+# Set ENV and COMPOSE file paths
ENV_FILE="../docker-compose/env/${testProfile}.env"
COMPOSE_FILE="../docker-compose/cps-base.yml"
+# Load environment variables from the selected .env file
+set -o allexport
+source "$ENV_FILE"
+set +o allexport
+
# Define images to pre-check (add more if needed)
IMAGES_TO_CHECK=(
- "nexus3.onap.org:10003/onap/dmi-stub:latest"
+ "nexus3.onap.org:10003/onap/dmi-stub:${DMI_DEMO_STUB_VERSION}"
)
+echo -e "\n📦 Docker images to check:"
+for img in "${IMAGES_TO_CHECK[@]}"; do
+ echo " - $img"
+done
+
# Run the image checks before anything else
check_images "${IMAGES_TO_CHECK[@]}"
# Start the containers
echo
-echo "Spinning off the following containers for '$testProfile'..."
-echo
-compose up --quiet-pull --detach --wait || { echo "Failed to start containers."; exit 1; }
-
-# Define port mappings based on the test profile
-declare -A CPS_PORTS=( ["kpi"]=8883 ["endurance"]=8884 )
-declare -A DMI_DEMO_STUB_PORTS=( ["kpi"]=8784 ["endurance"]=8787 )
+echo -e "\n🚀 Spinning off containers for profile: '$testProfile'...\n"
+compose up --quiet-pull --detach --wait || {
+ echo "❌ Failed to start containers."
+ exit 1
+}
-CPS_ACTUATOR_PORT="${CPS_PORTS[$testProfile]}"
-DMI_DEMO_STUB_ACTUATOR_PORT="${DMI_DEMO_STUB_PORTS[$testProfile]}"
+# Define port mappings from env file based on the test profile
+CPS_ACTUATOR_PORT=$CPS_CORE_PORT
+DMI_DEMO_STUB_ACTUATOR_PORT=$DMI_DEMO_STUB_PORT
-# Function to fetch and display build information
+# Fetch build info from actuators
fetch_build_info() {
local service_name="$1"
local port="$2"
local url="http://localhost:${port}/actuator/info"
- echo -e "\n${service_name} Build Information:"
+ echo -e "\n🔍 ${service_name} Build Information:"
if curl --silent --show-error "$url"; then
echo
else
- echo "Error: Unable to retrieve ${service_name} build information from ${url}"
+ echo "⚠️ Error: Unable to retrieve ${service_name} build information from ${url}"
exit 1
fi
}
# Fetch and display build information for CPS and DMI
fetch_build_info "CPS and NCMP" "$CPS_ACTUATOR_PORT"
fetch_build_info "DMI" "$DMI_DEMO_STUB_ACTUATOR_PORT"
-echo
\ No newline at end of file
+
+echo -e "\n✅ Setup complete for test profile: $testProfile"