From 9683aefbd9e466c762b630ca43e594394fb0a550 Mon Sep 17 00:00:00 2001 From: "halil.cakal" Date: Tue, 30 Sep 2025 13:45:33 +0100 Subject: [PATCH] Add scripts that manage k8s orchestration (Split-4) - updated bash scripts for installing charts, running k6 tests, uninstalling deployment, and storing pod logs - added environment variable: deploymentType to differentiate deployment cluster and hosts - added hosts that is related to k8s cluster deployment - updated readme.md - set timeout to 300s for the pod availability - increase resources (cpu and memory) for kafka and zookeper services to let them start successfully on virtual machine - increase resources (cpu and memory) for dbpostgresql service to support at least 30K cm handle for the virtual test server Issue-ID: CPS-2967 Change-Id: I464c551749725788019e1fec23a443085b2cb54d Signed-off-by: halil.cakal --- cps-charts/templates/postgresql-deployment.yaml | 3 - cps-charts/templates/zookeeper-deployment.yaml | 3 - cps-charts/values.yaml | 18 ++-- k6-tests/README.md | 39 +++++++- k6-tests/k6-main.sh | 38 +++++-- k6-tests/make-logs.sh | 128 ++++++++++++++++++++---- k6-tests/ncmp/common/produce-avc-event.js | 3 +- k6-tests/ncmp/common/utils.js | 9 +- k6-tests/ncmp/config/endurance.json | 7 +- k6-tests/ncmp/config/kpi.json | 7 +- k6-tests/ncmp/execute-k6-scenarios.sh | 3 +- k6-tests/ncmp/ncmp-test-runner.js | 1 - k6-tests/teardown.sh | 81 +++++++++++---- 13 files changed, 263 insertions(+), 77 deletions(-) diff --git a/cps-charts/templates/postgresql-deployment.yaml b/cps-charts/templates/postgresql-deployment.yaml index 3f888f73d8..9b29b16da9 100644 --- a/cps-charts/templates/postgresql-deployment.yaml +++ b/cps-charts/templates/postgresql-deployment.yaml @@ -33,9 +33,6 @@ spec: - name: init-sql mountPath: {{ .Values.postgresql.initSql.mountPath }} resources: - requests: - cpu: {{ .Values.postgresql.resources.requests.cpu }} - memory: {{ .Values.postgresql.resources.requests.memory }} limits: cpu: {{ .Values.postgresql.resources.limits.cpu }} memory: {{ .Values.postgresql.resources.limits.memory }} diff --git a/cps-charts/templates/zookeeper-deployment.yaml b/cps-charts/templates/zookeeper-deployment.yaml index b48128dd82..45c39dc30a 100644 --- a/cps-charts/templates/zookeeper-deployment.yaml +++ b/cps-charts/templates/zookeeper-deployment.yaml @@ -44,6 +44,3 @@ spec: limits: cpu: {{ .Values.zookeeper.resources.limits.cpu }} memory: {{ .Values.zookeeper.resources.limits.memory }} - requests: - cpu: {{ .Values.zookeeper.resources.requests.cpu }} - memory: {{ .Values.zookeeper.resources.requests.memory }} diff --git a/cps-charts/values.yaml b/cps-charts/values.yaml index 4abe474974..53597a7675 100644 --- a/cps-charts/values.yaml +++ b/cps-charts/values.yaml @@ -1,12 +1,9 @@ postgresql: image: "postgres:14.1-alpine" resources: - requests: - cpu: "1" - memory: "1Gi" limits: - cpu: "1" - memory: "1Gi" + cpu: "6" + memory: "3Gi" servicePort: 5432 env: POSTGRES_DB: "cpsdb" @@ -77,8 +74,8 @@ kafka: offsetsTopicReplicationFactor: 1 resources: limits: - cpu: "500m" - memory: "1Gi" + cpu: "1" + memory: "3Gi" healthcheck: enabled: true command: > @@ -99,11 +96,8 @@ zookeeper: port: 2181 resources: limits: - cpu: "500m" - memory: "1Gi" - requests: - cpu: "250m" - memory: "512Mi" + cpu: "1" + memory: "3Gi" env: ZOOKEEPER_CLIENT_PORT: 2181 healthcheck: diff --git a/k6-tests/README.md b/k6-tests/README.md index 44ad00240b..2c9e1e733f 100644 --- a/k6-tests/README.md +++ b/k6-tests/README.md @@ -6,13 +6,42 @@ k6 tests are written in JavaScript. ## k6 installation Follow the instructions in the [build from source guide](https://github.com/mostafa/xk6-kafka) to get started. -## Running the k6 test suites -These tests measure the system capabilities as per requirements. -There are two test profiles that can be run with either: kpi or endurance. +## Running k6 test suites +The CPS k6 tests measure the system capabilities as per requirements. + +### Test Profiles +There are two test profiles that can be run with either: +1. kpi — The test profile is to evaluate overall performance. +2. endurance — The test profile to measure long-term stability. + +### Deployment Types +1. dockerHosts — A docker-compose based deployment for the services in CPS/NCMP. +2. k8sHosts — A Kubernetes based deployment for the services in CPS/NCMP with Helm Charts. + +### Running the k6 test suites on a docker-compose environment +Only docker-compose deployment type supported: dockerHosts +Run the main script. +The script assumes k6 and the relevant docker-compose have been installed. +```shell +./k6-main.sh [kpi|endurance] [dockerHosts] +``` + +### Running the k6 test suites on a Kubernetes environment +Only kubernetes cluster deployment type supported: k8sHosts + +#### Prerequisites for Windows +1. Docker Desktop +2. Enable Kubernetes in Docker Desktop (Settings, Kubernetes). Known issue: it may hang on "starting kubernetes" for a few minutes. Resolution: click "Reset Cluster" then it starts. +3. Install Helm, see [installing helm on windows](https://helm.sh/docs/intro/install/). Recommended approach: install Helm with winget. + +#### Prerequisites for Linux +1. k3s from Rancher [installing k3s on linux](https://ranchermanager.docs.rancher.com/how-to-guides/new-user-guides/kubernetes-cluster-setup/k3s-for-rancher) +2. Install Helm, see [installing helm on linux](https://helm.sh/docs/intro/install/) + Run the main script. -(The script assumes k6 and the relevant docker-compose have been installed). +By default, kpi profile is supported, and it assumes the kubernetes environment with Helm is already available. ```shell -./k6-main.sh kpi +./k6-main.sh [kpi|endurance] [k8sHosts] ``` ## Running k6 tests manually diff --git a/k6-tests/k6-main.sh b/k6-tests/k6-main.sh index bc023d6051..ce76fe95bc 100755 --- a/k6-tests/k6-main.sh +++ b/k6-tests/k6-main.sh @@ -23,11 +23,15 @@ set -o pipefail # Use last non-zero exit code in a pipeline # Default test profile is kpi. testProfile=${1:-kpi} +# The default deployment type is dockerCompose +deploymentType=${2:-dockerHosts} + # Cleanup handler: capture exit status, run teardown, # and restore directory, report failures, and exit with original code. on_exit() { rc=$? - ./teardown.sh "$testProfile" + chmod +x teardown.sh + ./teardown.sh "$testProfile" "$deploymentType" popd echo "TEST FAILURES: $rc" exit $rc @@ -38,14 +42,34 @@ trap on_exit EXIT SIGINT SIGTERM SIGQUIT pushd "$(dirname "$0")" || exit 1 -# Install needed dependencies. -source install-deps.sh +# Handle deployment type specific setup +if [[ "$deploymentType" == "dockerHosts" ]]; then + echo "Test profile: $testProfile, and deployment type: $deploymentType provided for docker-compose cluster" + + # Install needed dependencies for docker deployment + source install-deps.sh + + # Run setup for docker-compose environment + ./setup.sh "$testProfile" + +elif [[ "$deploymentType" == "k8sHosts" ]]; then + echo "Test profile: $testProfile, and deployment type: $deploymentType provided for k8s cluster" + + # Deploy cps charts for k8s + helm install cps ../cps-charts + + # Wait for pods and services until becomes ready + echo "Waiting for cps and ncmp pods to be ready..." + kubectl wait --for=condition=available deploy -l app=cps-and-ncmp --timeout=300s -echo "Test profile provided: $testProfile" +else + echo "Error: Unsupported deployment type: $deploymentType" + echo "Supported deployment types: dockerHosts, k8sHosts" + exit 1 +fi -# Run k6 test suite. -./setup.sh "$testProfile" -./ncmp/execute-k6-scenarios.sh "$testProfile" +# Run k6 test suite for both deployment types +./ncmp/execute-k6-scenarios.sh "$testProfile" "$deploymentType" NCMP_RESULT=$? # Note that the final steps are done in on_exit function after this exit! diff --git a/k6-tests/make-logs.sh b/k6-tests/make-logs.sh index 56907fd34d..0707650478 100755 --- a/k6-tests/make-logs.sh +++ b/k6-tests/make-logs.sh @@ -21,12 +21,21 @@ set -euo pipefail readonly LOG_DIR="${WORKSPACE:-.}/logs" readonly LOG_RETENTION_DAYS=14 readonly TIMESTAMP=$(date +"%Y%m%d%H%M%S") -readonly SERVICES_TO_BE_LOGGED=("cps-and-ncmp" "ncmp-dmi-plugin-demo-and-csit-stub" "dbpostgresql") + +# Docker services +readonly DOCKER_SERVICES_TO_BE_LOGGED=("cps-and-ncmp" "ncmp-dmi-plugin-demo-and-csit-stub" "dbpostgresql") + +# Kubernetes services +readonly K8S_SERVICES_TO_BE_LOGGED=("cps-cps-and-ncmp-cps" "cps-cps-and-ncmp-dmi-stub" "cps-cps-and-ncmp-postgresql") +readonly K8S_APP_LABEL="app=cps-and-ncmp" + +# The default deployment type is dockerHosts +deploymentType=${1:-dockerHosts} # Ensure log directory exists mkdir -p "$LOG_DIR" -# Function to fetch logs from a container +# Function to fetch logs from a Docker container fetch_container_logs() { local container_id="$1" local container_name @@ -35,34 +44,113 @@ fetch_container_logs() { docker logs "$container_id" > "$log_file" } -# Function to archive logs for a service -archive_service_logs() { +# Function to fetch logs from Kubernetes pods +fetch_pod_logs() { + local service_name="$1" + local temp_dir="$2" + + # Get pod names for the current service, filtering by the app label and then grepping by service name. + local pod_names + pod_names=$(kubectl get pods -l "$K8S_APP_LABEL" --no-headers -o custom-columns=":metadata.name" | grep "^${service_name}" || echo "") + + if [ -z "$pod_names" ]; then + echo "No running pods found for service: $service_name" + return 1 + fi + + for pod_name in $pod_names; do + echo " Fetching logs for pod: $pod_name" + local log_file="$temp_dir/${pod_name}_logs.log" + kubectl logs "$pod_name" > "$log_file" + done + + return 0 +} + +# Generic function to create zip archive from collected logs +create_log_archive() { local service_name="$1" local temp_dir="$2" local zip_file="$3" + # Only create a zip file if logs were collected + if [ -n "$(ls -A "$temp_dir")" ]; then + echo " Zipping logs to $zip_file" + # The -j option flattens the directory structure. Logs will be at the root of the zip. + zip -r -j "$zip_file" "$temp_dir" + echo " Logs for service '$service_name' saved to $zip_file" + else + echo " No logs were fetched for service '$service_name'" + fi +} + +# Generic function to archive logs for a service (works for both Docker and Kubernetes) +archive_service_logs() { + local service_name="$1" + local deployment_type="$2" + local temp_dir="$LOG_DIR/temp_${deployment_type}_${service_name}_$TIMESTAMP" + local zip_file="$LOG_DIR/logs_${deployment_type}_${service_name}_$TIMESTAMP.zip" + local logs_fetched=false + + echo "Processing service: $service_name" mkdir -p "$temp_dir" - local container_ids - container_ids=$(docker ps --filter "name=$service_name" --format "{{.ID}}") + case "$deployment_type" in + "docker") + local container_ids + container_ids=$(docker ps --filter "name=$service_name" --format "{{.ID}}") - for container_id in $container_ids; do - fetch_container_logs "$container_id" "$temp_dir" - done + if [ -z "$container_ids" ]; then + echo "No running containers found for service: $service_name" + else + for container_id in $container_ids; do + fetch_container_logs "$container_id" "$temp_dir" + done + logs_fetched=true + fi + ;; + "k8s") + if fetch_pod_logs "$service_name" "$temp_dir"; then + logs_fetched=true + fi + ;; + esac - zip -r "$zip_file" "$temp_dir" - echo "Logs for service '$service_name' saved to $zip_file" + if [ "$logs_fetched" = true ]; then + create_log_archive "$service_name" "$temp_dir" "$zip_file" + fi + # Clean up the temporary directory rm -r "$temp_dir" } -# Main process -for service_name in "${SERVICES_TO_BE_LOGGED[@]}"; do - temp_dir="$LOG_DIR/temp_${service_name}_$TIMESTAMP" - zip_file="$LOG_DIR/logs_${service_name}_$TIMESTAMP.zip" - - archive_service_logs "$service_name" "$temp_dir" "$zip_file" -done +# Function to clean up old logs +cleanup_old_logs() { + local pattern="$1" + echo "Cleaning up logs older than $LOG_RETENTION_DAYS days..." + find "$LOG_DIR" -name "$pattern" -mtime +$LOG_RETENTION_DAYS -delete +} -# Clean up old logs -find "$LOG_DIR" -name "logs_*.zip" -mtime +$LOG_RETENTION_DAYS -delete +# Main process - handle different deployment types +case "$deploymentType" in + "dockerHosts") + echo "Processing Docker Compose deployment logs..." + for service_name in "${DOCKER_SERVICES_TO_BE_LOGGED[@]}"; do + archive_service_logs "$service_name" "docker" + done + cleanup_old_logs "logs_docker_*.zip" + ls -la "$LOG_DIR"/logs_docker_*.zip 2>/dev/null || echo "No Docker log zip files found." + ;; + "k8sHosts") + echo "Processing Kubernetes deployment logs..." + for service_name in "${K8S_SERVICES_TO_BE_LOGGED[@]}"; do + archive_service_logs "$service_name" "k8s" + done + cleanup_old_logs "logs_k8s_*.zip" + ls -la "$LOG_DIR"/logs_k8s_*.zip 2>/dev/null || echo "No Kubernetes log zip files found." + ;; + *) + echo "Error: Unknown deployment type '$deploymentType'. Supported types: 'dockerHosts', 'k8sHosts'" + exit 1 + ;; +esac \ No newline at end of file diff --git a/k6-tests/ncmp/common/produce-avc-event.js b/k6-tests/ncmp/common/produce-avc-event.js index 4ddbc43d14..34efa76b8f 100644 --- a/k6-tests/ncmp/common/produce-avc-event.js +++ b/k6-tests/ncmp/common/produce-avc-event.js @@ -20,12 +20,13 @@ import {check} from 'k6'; import {Writer, SchemaRegistry, SCHEMA_TYPE_STRING} from 'k6/x/kafka'; +import {KAFKA_BOOTSTRAP_SERVERS} from './utils.js'; const testEventPayload = JSON.stringify(JSON.parse(open('../../resources/sampleAvcInputEvent.json'))); const schemaRegistry = new SchemaRegistry(); const kafkaProducer = new Writer({ - brokers: ['localhost:9092'], + brokers: [KAFKA_BOOTSTRAP_SERVERS], topic: 'dmi-cm-events', autoCreateTopic: true, batchSize: 5000, diff --git a/k6-tests/ncmp/common/utils.js b/k6-tests/ncmp/common/utils.js index dd1f1b3780..8691a3710c 100644 --- a/k6-tests/ncmp/common/utils.js +++ b/k6-tests/ncmp/common/utils.js @@ -24,12 +24,13 @@ import {check} from 'k6'; import {Trend} from 'k6/metrics'; export const TEST_PROFILE = __ENV.TEST_PROFILE ? __ENV.TEST_PROFILE : 'kpi' +export const HOST_TYPE = __ENV.DEPLOYMENT_TYPE ? __ENV.DEPLOYMENT_TYPE : 'dockerHosts' export const testConfig = JSON.parse(open(`../config/${TEST_PROFILE}.json`)); export const scenarioMetaData = JSON.parse(open(`../config/scenario-metadata.json`)); -export const KAFKA_BOOTSTRAP_SERVERS = testConfig.hosts.kafkaBootstrapServer; -export const NCMP_BASE_URL = testConfig.hosts.ncmpBaseUrl; -export const DMI_PLUGIN_URL = testConfig.hosts.dmiStubUrl; -export const CONTAINER_COOL_DOWW_TIME_IN_SECONDS = testConfig.hosts.containerCoolDownTimeInSeconds; +export const KAFKA_BOOTSTRAP_SERVERS = testConfig[`${HOST_TYPE}`].kafkaBootstrapServer; +export const NCMP_BASE_URL = testConfig[`${HOST_TYPE}`].ncmpBaseUrl; +export const DMI_PLUGIN_URL = testConfig[`${HOST_TYPE}`].dmiStubUrl; +export const CONTAINER_COOL_DOWW_TIME_IN_SECONDS = testConfig[`${HOST_TYPE}`].containerCoolDownTimeInSeconds || 10; export const LEGACY_BATCH_TOPIC_NAME = 'legacy_batch_topic'; export const TOTAL_CM_HANDLES = __ENV.TOTAL_CM_HANDLES ? parseInt(__ENV.TOTAL_CM_HANDLES) : 50000; export const REGISTRATION_BATCH_SIZE = 2000; diff --git a/k6-tests/ncmp/config/endurance.json b/k6-tests/ncmp/config/endurance.json index d090603825..4583e63520 100644 --- a/k6-tests/ncmp/config/endurance.json +++ b/k6-tests/ncmp/config/endurance.json @@ -1,10 +1,15 @@ { - "hosts": { + "dockerHosts": { "ncmpBaseUrl": "http://localhost:8884", "dmiStubUrl": "http://ncmp-dmi-plugin-demo-and-csit-stub:8092", "kafkaBootstrapServer": "localhost:9093", "containerCoolDownTimeInSeconds": 420 }, + "k8sHosts": { + "ncmpBaseUrl": "http://localhost:30080", + "dmiStubUrl": "http://cps-cps-and-ncmp-dmi-stub:8092", + "kafkaBootstrapServer": "localhost:30093" + }, "scenarios": { "passthrough_read_alt_id_scenario": { "executor": "constant-vus", diff --git a/k6-tests/ncmp/config/kpi.json b/k6-tests/ncmp/config/kpi.json index 2339f4072c..37dccf8fdc 100644 --- a/k6-tests/ncmp/config/kpi.json +++ b/k6-tests/ncmp/config/kpi.json @@ -1,10 +1,15 @@ { - "hosts": { + "dockerHosts": { "ncmpBaseUrl": "http://localhost:8883", "dmiStubUrl": "http://ncmp-dmi-plugin-demo-and-csit-stub:8092", "kafkaBootstrapServer": "localhost:9092", "containerCoolDownTimeInSeconds": 10 }, + "k8sHosts": { + "ncmpBaseUrl": "http://localhost:30080", + "dmiStubUrl": "http://cps-cps-and-ncmp-dmi-stub:8092", + "kafkaBootstrapServer": "localhost:30093" + }, "scenarios": { "passthrough_read_alt_id_scenario": { "executor": "constant-arrival-rate", diff --git a/k6-tests/ncmp/execute-k6-scenarios.sh b/k6-tests/ncmp/execute-k6-scenarios.sh index 0bbe6be6f4..4526346f85 100755 --- a/k6-tests/ncmp/execute-k6-scenarios.sh +++ b/k6-tests/ncmp/execute-k6-scenarios.sh @@ -28,6 +28,7 @@ pushd "$(dirname "$0")" >/dev/null || { # ───────────────────────────────────────────────────────────── threshold_failures=0 testProfile=$1 +deploymentType=$2 summaryFile="${testProfile}Summary.csv" echo "Running $testProfile performance tests..." @@ -36,7 +37,7 @@ echo "Running $testProfile performance tests..." # '$?' is immediately captures the exit code after k6 finishes, # and assign it to k6_exit_code. # ───────────────────────────────────────────────────────────── -k6 run ncmp-test-runner.js --quiet -e TEST_PROFILE="$testProfile" > "$summaryFile" +k6 run ncmp-test-runner.js --quiet -e TEST_PROFILE="$testProfile" -e DEPLOYMENT_TYPE="$deploymentType" > "$summaryFile" k6_exit_code=$? case $k6_exit_code in diff --git a/k6-tests/ncmp/ncmp-test-runner.js b/k6-tests/ncmp/ncmp-test-runner.js index a225cf7e45..0cef1ffc08 100644 --- a/k6-tests/ncmp/ncmp-test-runner.js +++ b/k6-tests/ncmp/ncmp-test-runner.js @@ -103,7 +103,6 @@ export function teardown() { const totalDeregistrationTimeInSeconds = (endTimeInMillis - startTimeInMillis) / 1000.0; kpiTrendDeclarations.cm_handles_deleted.add(numberOfDeregisteredCmHandles / totalDeregistrationTimeInSeconds); - sleep(CONTAINER_COOL_DOWW_TIME_IN_SECONDS); } diff --git a/k6-tests/teardown.sh b/k6-tests/teardown.sh index a51ede2de0..254c91fa40 100755 --- a/k6-tests/teardown.sh +++ b/k6-tests/teardown.sh @@ -15,15 +15,25 @@ # limitations under the License. # -echo '================================== docker info ==========================' -docker ps -a +# The default test profile is kpi, and deployment type is k8sHosts +testProfile=${1:-kpi} +deploymentType=${2:-dockerHosts} -# Zip and store logs for the containers -chmod +x make-logs.sh -./make-logs.sh +# Function to create and store logs +make_logs() { + local deployment_type=$1 + echo "Creating logs for deployment type: $deployment_type" + chmod +x make-logs.sh + ./make-logs.sh "$deployment_type" +} -testProfile=$1 -docker_compose_shutdown_cmd="docker-compose -f ../docker-compose/docker-compose.yml --project-name $testProfile down --volumes" +# Function to clean Docker images based on CLEAN_DOCKER_IMAGES environment variable +clean_docker_images_if_needed() { + if [[ "${CLEAN_DOCKER_IMAGES:-0}" -eq 1 ]]; then + echo "Also cleaning up all CPS images" + remove_cps_images + fi +} # All CPS docker images: # nexus3.onap.org:10003/onap/cps-and-ncmp:latest @@ -46,15 +56,50 @@ remove_cps_images() { done } -# Check env. variable CLEAN_DOCKER_IMAGES=1 to decide removing CPS images -echo "Stopping, Removing containers and volumes for $testProfile tests..." -if [[ "${CLEAN_DOCKER_IMAGES:-0}" -eq 1 ]]; then - # down the compose stack, then purge any remaining CPS images, - # regardless of any test profile! - eval "$docker_compose_shutdown_cmd" - echo "Also cleaning up all CPS images" - remove_cps_images -else - # for local test operations +# Function to teardown docker-compose deployment +teardown_docker_deployment() { + echo '================================== docker info ==========================' + docker ps -a + + # Zip and store logs for the containers + make_logs "dockerHosts" + + local docker_compose_shutdown_cmd="docker-compose -f ../docker-compose/docker-compose.yml --project-name $testProfile down --volumes" + + # Check env. variable CLEAN_DOCKER_IMAGES=1 to decide removing CPS images + echo "Stopping, Removing containers and volumes for $testProfile tests..." eval "$docker_compose_shutdown_cmd" -fi + + # Clean Docker images if requested + clean_docker_images_if_needed +} + +# Function to teardown kubernetes deployment +teardown_k8s_deployment() { + echo '================================== k8s info ==========================' + kubectl get all -l app=cps-and-ncmp + + # Zip and store logs for the containers + make_logs "k8sHosts" + + echo '================================== uninstalling cps... ==========================' + helm uninstall cps + + # Clean Docker images if requested + clean_docker_images_if_needed +} + +# Main logic: determine which deployment type to teardown +case "$deploymentType" in + "k8sHosts") + teardown_k8s_deployment + ;; + "dockerHosts") + teardown_docker_deployment + ;; + *) + echo "Unknown deployment type: $deploymentType" + echo "Supported deployment types: k8sHosts, dockerHosts" + exit 1 + ;; +esac \ No newline at end of file -- 2.16.6