Merge "Modify INFO.yaml for committer rights"
authorToine Siebelink <toine.siebelink@est.tech>
Thu, 28 Sep 2023 08:10:18 +0000 (08:10 +0000)
committerGerrit Code Review <gerrit@onap.org>
Thu, 28 Sep 2023 08:10:18 +0000 (08:10 +0000)
cps-application/src/main/resources/application.yml
cps-dependencies/pom.xml
cps-ncmp-service/src/main/java/org/onap/cps/ncmp/api/impl/config/embeddedcache/SynchronizationCacheConfig.java
cps-ncmp-service/src/main/java/org/onap/cps/ncmp/api/impl/inventory/sync/ModuleSyncWatchdog.java
cps-ncmp-service/src/test/groovy/org/onap/cps/ncmp/api/impl/config/embeddedcache/SynchronizationCacheConfigSpec.groovy
cps-ncmp-service/src/test/groovy/org/onap/cps/ncmp/api/inventory/sync/ModuleSyncWatchdogSpec.groovy
cps-parent/pom.xml
cps-ri/src/main/resources/hibernate.cfg.xml
docs/deployment.rst
integration-test/src/test/groovy/org/onap/cps/integration/performance/ncmp/CmDataSubscriptionsPerfTest.groovy
integration-test/src/test/resources/hibernate.cfg.xml

index 0163568..7beab2e 100644 (file)
@@ -37,9 +37,8 @@ spring:
         ddl-auto: create
         open-in-view: false
         properties:
-            hibernate:
-                enable_lazy_load_no_trans: true
-                dialect: org.hibernate.dialect.PostgreSQLDialect
+            hibernate.enable_lazy_load_no_trans: true
+            hibernate.dialect: org.hibernate.dialect.PostgreSQLDialect
 
     datasource:
         url: jdbc:postgresql://${DB_HOST}:${DB_PORT:5432}/cpsdb
index 16f76b9..f6931c3 100755 (executable)
@@ -78,7 +78,7 @@
             <dependency>
                 <groupId>org.springframework.boot</groupId>
                 <artifactId>spring-boot-dependencies</artifactId>
-                <version>3.0.0</version>
+                <version>3.1.2</version>
                 <type>pom</type>
                 <scope>import</scope>
             </dependency>
             <dependency>
                 <groupId>io.springfox</groupId>
                 <artifactId>springfox-boot-starter</artifactId>
-                <version>3.0.0</version>
+                <version>3.1.2</version>
             </dependency>
             <dependency>
                 <groupId>com.google.code.gson</groupId>
             <dependency>
                 <groupId>com.fasterxml.jackson.core</groupId>
                 <artifactId>jackson-databind</artifactId>
-                <version>2.14.0</version>
+                <version>2.15.2</version>
             </dependency>
             <dependency>
                 <groupId>org.eclipse.jetty</groupId>
index 62a380c..8b28717 100644 (file)
@@ -23,6 +23,7 @@ package org.onap.cps.ncmp.api.impl.config.embeddedcache;
 import com.hazelcast.config.MapConfig;
 import com.hazelcast.config.QueueConfig;
 import com.hazelcast.map.IMap;
+import java.util.Set;
 import java.util.concurrent.BlockingQueue;
 import lombok.extern.slf4j.Slf4j;
 import org.onap.cps.cache.HazelcastCacheConfig;
@@ -44,6 +45,8 @@ public class SynchronizationCacheConfig extends HazelcastCacheConfig {
     private static final MapConfig moduleSyncStartedConfig = createMapConfig("moduleSyncStartedConfig");
     private static final MapConfig dataSyncSemaphoresConfig = createMapConfig("dataSyncSemaphoresConfig");
 
+    private static final MapConfig moduleSetTagCacheMapConfig = createMapConfig("moduleSetTagCacheMapConfig");
+
     /**
      * Module Sync Distributed Queue Instance.
      *
@@ -74,4 +77,15 @@ public class SynchronizationCacheConfig extends HazelcastCacheConfig {
     public IMap<String, Boolean> dataSyncSemaphores() {
         return createHazelcastInstance("dataSyncSemaphores", dataSyncSemaphoresConfig).getMap("dataSyncSemaphores");
     }
+
+    /**
+     * IMap instance for cached ModulesSetTags.
+     *
+     * @return configured map of ModuleSetTags
+     */
+    @Bean
+    public IMap<String, Set<String>> moduleSetTagCache() {
+        return createHazelcastInstance("moduleSetTags", moduleSetTagCacheMapConfig)
+                .getMap("moduleSetTagCache");
+    }
 }
index 916fafd..6ba52ee 100644 (file)
@@ -25,6 +25,7 @@ import com.hazelcast.map.IMap;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
@@ -48,6 +49,7 @@ public class ModuleSyncWatchdog {
     private final IMap<String, Object> moduleSyncStartedOnCmHandles;
     private final ModuleSyncTasks moduleSyncTasks;
     private final AsyncTaskExecutor asyncTaskExecutor;
+    private final IMap<String, Set<String>> moduleSetTagCache;
     private static final int MODULE_SYNC_BATCH_SIZE = 100;
     private static final long PREVENT_CPU_BURN_WAIT_TIME_MILLIS = 10;
     private static final String VALUE_FOR_HAZELCAST_IN_PROGRESS_MAP = "Started";
index c0fc18a..2fa9606 100644 (file)
@@ -44,6 +44,9 @@ class SynchronizationCacheConfigSpec extends Specification {
     @Autowired
     private IMap<String, Boolean> dataSyncSemaphores
 
+    @Autowired
+    private IMap<String, Set<String>> moduleSetTagCache
+
     def 'Embedded (hazelcast) Caches for Module and Data Sync.'() {
         expect: 'system is able to create an instance of the Module Sync Work Queue'
             assert null != moduleSyncWorkQueue
@@ -51,10 +54,12 @@ class SynchronizationCacheConfigSpec extends Specification {
             assert null != moduleSyncStartedOnCmHandles
         and: 'system is able to create an instance of a map to hold data sync semaphores'
             assert null != dataSyncSemaphores
-        and: 'there are at least 3 instances'
-            assert Hazelcast.allHazelcastInstances.size() > 2
+        and: 'system is able to create an instance of a map to hold module set tags'
+            assert null != moduleSetTagCache
+            and: 'there are at least 4 instances'
+        assert Hazelcast.allHazelcastInstances.size() > 3
         and: 'they have the correct names (in any order)'
-            assert Hazelcast.allHazelcastInstances.name.containsAll('moduleSyncWorkQueue', 'moduleSyncStartedOnCmHandles', 'dataSyncSemaphores' )
+            assert Hazelcast.allHazelcastInstances.name.containsAll('moduleSyncWorkQueue', 'moduleSyncStartedOnCmHandles', 'dataSyncSemaphores', 'moduleSetTags')
     }
 
     def 'Verify configs for Distributed objects'(){
@@ -67,6 +72,9 @@ class SynchronizationCacheConfigSpec extends Specification {
         and: 'the Data Sync Semaphores Map config'
             def dataSyncSemaphoresConfig =  Hazelcast.getHazelcastInstanceByName('dataSyncSemaphores').config
             def dataSyncSemaphoresMapConfig =  dataSyncSemaphoresConfig.mapConfigs.get('dataSyncSemaphoresConfig')
+        and: 'the Module Set Tag Map config'
+            def moduleSetTagCacheConfig =  Hazelcast.getHazelcastInstanceByName('moduleSetTags').config
+            def moduleSetTagMapConfig =  moduleSetTagCacheConfig.mapConfigs.get('moduleSetTagCacheMapConfig')
         expect: 'system created instance with correct config of Module Sync Work Queue'
             assert moduleSyncDefaultWorkQueueConfig.backupCount == 3
             assert moduleSyncDefaultWorkQueueConfig.asyncBackupCount == 3
@@ -76,11 +84,15 @@ class SynchronizationCacheConfigSpec extends Specification {
         and: 'Data Sync Semaphore Map has the correct settings'
             assert dataSyncSemaphoresMapConfig.backupCount == 3
             assert dataSyncSemaphoresMapConfig.asyncBackupCount == 3
+        and: 'Module Set Tag Map has the correct settings'
+            assert moduleSetTagMapConfig.backupCount == 3
+            assert moduleSetTagMapConfig.asyncBackupCount == 3
         and: 'all instances are part of same cluster'
             def testClusterName = 'cps-and-ncmp-test-caches'
             assert moduleSyncWorkQueueConfig.clusterName == testClusterName
             assert moduleSyncStartedOnCmHandlesConfig.clusterName == testClusterName
             assert dataSyncSemaphoresConfig.clusterName == testClusterName
+            assert moduleSetTagCacheConfig.clusterName == testClusterName
     }
 
     def 'Verify deployment network configs for Distributed objects'() {
@@ -90,6 +102,8 @@ class SynchronizationCacheConfigSpec extends Specification {
             def moduleSyncStartedOnCmHandlesNetworkConfig = Hazelcast.getHazelcastInstanceByName('moduleSyncStartedOnCmHandles').config.networkConfig
         and: 'the Data Sync Semaphores Map config'
             def dataSyncSemaphoresNetworkConfig = Hazelcast.getHazelcastInstanceByName('dataSyncSemaphores').config.networkConfig
+        and: 'the Module Set Tag Map config'
+            def moduleSetTagNetworkConfig = Hazelcast.getHazelcastInstanceByName('moduleSetTags').config.networkConfig
         expect: 'system created instance with correct config of Module Sync Work Queue'
             assert queueNetworkConfig.join.autoDetectionConfig.enabled
             assert !queueNetworkConfig.join.kubernetesConfig.enabled
@@ -99,7 +113,9 @@ class SynchronizationCacheConfigSpec extends Specification {
         and: 'Data Sync Semaphore Map has the correct settings'
             assert dataSyncSemaphoresNetworkConfig.join.autoDetectionConfig.enabled
             assert !dataSyncSemaphoresNetworkConfig.join.kubernetesConfig.enabled
-
+        and: 'Module Set Tag Map has the correct settings'
+            assert moduleSetTagNetworkConfig.join.autoDetectionConfig.enabled
+            assert !moduleSetTagNetworkConfig.join.kubernetesConfig.enabled
     }
 
     def 'Verify network config'() {
@@ -135,6 +151,15 @@ class SynchronizationCacheConfigSpec extends Specification {
             waitMax2SecondsForKeyExpiration(dataSyncSemaphores, 'testKeyDataSync')
     }
 
+    def 'Time to Live Verify for Module Set Tag'() {
+        when: 'the key is inserted with a TTL of 1 second'
+            moduleSetTagCache.put('testKeyModuleSetTag', ['module-set-tag'] as Set, 1, TimeUnit.SECONDS)
+        then: 'the entry is present in the map'
+            assert moduleSetTagCache.get('testKeyModuleSetTag') != null
+        and: 'the entry expires in less then 2 seconds'
+            waitMax2SecondsForKeyExpiration(moduleSetTagCache, 'testKeyModuleSetTag')
+    }
+
     def waitMax2SecondsForKeyExpiration(map, key) {
         def count = 0
         while ( map.get(key)!=null && ++count <= 20 ) {
index 94ee6ea..d85686a 100644 (file)
@@ -45,7 +45,9 @@ class ModuleSyncWatchdogSpec extends Specification {
 
     def spiedAsyncTaskExecutor = Spy(AsyncTaskExecutor)
 
-    def objectUnderTest = new ModuleSyncWatchdog(mockSyncUtils, moduleSyncWorkQueue , mockModuleSyncStartedOnCmHandles, mockModuleSyncTasks, spiedAsyncTaskExecutor)
+    def moduleSetTagCache = Mock(IMap<String, Set<String>>)
+
+    def objectUnderTest = new ModuleSyncWatchdog(mockSyncUtils, moduleSyncWorkQueue , mockModuleSyncStartedOnCmHandles, mockModuleSyncTasks, spiedAsyncTaskExecutor, moduleSetTagCache)
 
     void setup() {
         spiedAsyncTaskExecutor.setupThreadPool()
index 248bc28..4121556 100755 (executable)
                 <plugin>
                     <groupId>org.springframework.boot</groupId>
                     <artifactId>spring-boot-maven-plugin</artifactId>
-                    <version>3.0.0</version>
+                    <version>3.1.2</version>
                     <executions>
                         <execution>
                             <goals>
index 98e6cfc..1b822b9 100644 (file)
@@ -9,7 +9,7 @@
         <property name="hibernate.connection.url">jdbc:postgresql://${DB_HOST}:${DB_PORT:5432}/cpsdb</property>
         <property name="hibernate.connection.username">${DB_USERNAME}</property>
         <property name="hibernate.connection.password">${DB_PASSWORD}</property>
-        <property name="hibernate.dialect">org.hibernate.dialect.PostgreSQL82Dialect</property>
+        <property name="hibernate.dialect">org.hibernate.dialect.PostgreSQLDialect</property>
         <property name="show_sql">true</property>
         <property name="hibernate.hbm2ddl.auto">update</property>
     </session-factory>
index acc32e3..0642e6a 100644 (file)
@@ -336,5 +336,7 @@ Below are the list of distributed datastructures that we have.
 +--------------+---------------------------------+----------------------------------------------------------+
 | cps-ncmp     | trustLevelPerDmiPlugin          | Stores the TrustLevel for the dmi-plugins.               |
 +--------------+---------------------------------+----------------------------------------------------------+
+| cps-ncmp     | moduleSetTagCacheMapConfig      | Stores the Module Set Tags for cmHandles.                |
++--------------+---------------------------------+----------------------------------------------------------+
 
-Total number of caches : 7
\ No newline at end of file
+Total number of caches : 8
\ No newline at end of file
index 7e7dedf..cf5c3f6 100644 (file)
@@ -55,47 +55,63 @@ class CmDataSubscriptionsPerfTest extends NcmpPerfTestBase {
             recordAndAssertPerformance("Query all subscribers", 1_000, durationInMillis)
     }
 
-    def 'Worst case new subscription (200x10 new entries).'() {
-        given: 'a new subscription with non-matching data'
-            def subscribers = createLeafList('subscribers',1, subscriberIdPrefix)
-            def filters = '"filters":' + createJsonArray('filter',numberOfFiltersPerCmHandle,'xpath','other_' + xpathPrefix,subscribers)
-            def cmHandles = createJsonArray('cm-handle',numberOfCmHandlesPerCmDataSubscription,'id','other' + cmHandlePrefix, filters)
-        when: 'Insert a new subscription'
-            stopWatch.start()
-            cpsDataService.saveData(NCMP_PERFORMANCE_TEST_DATASPACE, CM_DATA_SUBSCRIPTIONS_ANCHOR, xPathForDataStore1CmHandles, cmHandles, now)
-            stopWatch.stop()
-            def durationInMillis = stopWatch.getTotalTimeMillis()
-        then: 'insert new subscription with 1 second'
-            recordAndAssertPerformance("Insert new subscription", 1_000, durationInMillis)
-    }
-
     def 'Worst case subscription update (200x10 matching entries).'() {
         given: 'all filters are queried'
             def cpsPath = '//filter'
             def result = objectUnderTest.queryDataNodes(NCMP_PERFORMANCE_TEST_DATASPACE, CM_DATA_SUBSCRIPTIONS_ANCHOR, cpsPath, INCLUDE_ALL_DESCENDANTS)
+        and: 'there are the expected number of subscribers per subscription'
+            assert result.collect {it.leaves.subscribers.size()}.sum() == totalNumberOfEntries * numberOfCmDataSubscribers
         and: 'find all entries for an existing subscriptions'
             def matches = querySubscriptionsByIteration(result, 1)
-        when: 'Update all subscriptions found'
+        when: 'update all subscriptions found'
             stopWatch.start()
-            /* the production code version of this should manipulate the original subscribersAsArray of course
-               but for the (performance) poc creating another array with one extra element suffices
-             */
-            def jsonPerPath = [:]
-            matches.each { xpath, subscribersAsArray ->
+            HashMap<String, List<String>> filterEntriesPerPath = [:]
+            matches.each { dataNode, subscribersAsArray ->
                 def updatedSubscribers = createLeafList('subscribers', 1 + numberOfCmDataSubscribers, subscriberIdPrefix)
-                def filterEntry = '{"filter": {"xpath":"' + xpath + '", ' + updatedSubscribers + ' } }'
-                def parentPath = xpath.toString().substring(0, xpath.toString().indexOf('/filter[@xpath='))
-                jsonPerPath.put(parentPath, filterEntry)
+                def filterEntry = '{"xpath":"' + dataNode.leaves.xpath + '", ' + updatedSubscribers + ' }'
+                def parentPath = dataNode.xpath.toString().substring(0, dataNode.xpath.toString().indexOf('/filter[@xpath='))
+                filterEntriesPerPath.putIfAbsent(parentPath, new ArrayList<String>())
+                filterEntriesPerPath.get(parentPath).add(filterEntry)
+            }
+            HashMap<String, String> jsonPerPath = [:]
+            filterEntriesPerPath.each { parentPath, filterEntries ->
+                jsonPerPath.put(parentPath, '{"filter": [' + filterEntries.join(',') + ']}')
             }
-            cpsDataService.updateDataNodesAndDescendants(NCMP_PERFORMANCE_TEST_DATASPACE, CM_DATA_SUBSCRIPTIONS_ANCHOR, jsonPerPath, now)
+
+            // NOTE Below fails as updateDataNodesAndDescendants can't handle JSON lists!
+            // cpsDataService.updateDataNodesAndDescendants(NCMP_PERFORMANCE_TEST_DATASPACE, CM_DATA_SUBSCRIPTIONS_ANCHOR, jsonPerPath, now)
+
+            // So update for each CM-handle instead:
+            jsonPerPath.each { parentPath, json ->
+                // Around 8.5 seconds for long strings, 4.8 with short strings
+                // cpsDataService.updateDataNodeAndDescendants(NCMP_PERFORMANCE_TEST_DATASPACE, CM_DATA_SUBSCRIPTIONS_ANCHOR, parentPath, json, now)
+                // Around 6.5 seconds for long strings, 3.3 seconds with short strings
+                cpsDataService.updateNodeLeaves(NCMP_PERFORMANCE_TEST_DATASPACE, CM_DATA_SUBSCRIPTIONS_ANCHOR, parentPath, json, now)
+            }
+
             stopWatch.stop()
             def durationInMillis = stopWatch.getTotalTimeMillis()
-        then: 'Update matching subscription within 8 seconds'
-            //TODO Toine check with Daniel if this can be optimized quickly without really changing production code
-            // ie is there a better way of doing these 2,000 updates
+        then: 'a subscriber has been added to each filter entry'
+            def resultAfter = objectUnderTest.queryDataNodes(NCMP_PERFORMANCE_TEST_DATASPACE, CM_DATA_SUBSCRIPTIONS_ANCHOR, cpsPath, INCLUDE_ALL_DESCENDANTS)
+            assert resultAfter.collect {it.leaves.subscribers.size()}.sum() == totalNumberOfEntries * (1 + numberOfCmDataSubscribers)
+        and: 'update matching subscription within 8 seconds'
             recordAndAssertPerformance("Update matching subscription", 8_000, durationInMillis)
     }
 
+    def 'Worst case new subscription (200x10 new entries).'() {
+        given: 'a new subscription with non-matching data'
+            def subscribers = createLeafList('subscribers',1, subscriberIdPrefix)
+            def filters = '"filters":' + createJsonArray('filter',numberOfFiltersPerCmHandle,'xpath','other_' + xpathPrefix,subscribers)
+            def cmHandles = createJsonArray('cm-handle',numberOfCmHandlesPerCmDataSubscription,'id','other' + cmHandlePrefix, filters)
+        when: 'Insert a new subscription'
+            stopWatch.start()
+            cpsDataService.saveData(NCMP_PERFORMANCE_TEST_DATASPACE, CM_DATA_SUBSCRIPTIONS_ANCHOR, xPathForDataStore1CmHandles, cmHandles, now)
+            stopWatch.stop()
+            def durationInMillis = stopWatch.getTotalTimeMillis()
+        then: 'insert new subscription with 1 second'
+            recordAndAssertPerformance("Insert new subscription", 1_000, durationInMillis)
+    }
+
     def querySubscriptionsByIteration(Collection<DataNode> allSubscriptionsAsDataNodes, targetSubscriptionSequenceNumber) {
         def matches = [:]
         allSubscriptionsAsDataNodes.each {
@@ -104,7 +120,7 @@ class CmDataSubscriptionsPerfTest extends NcmpPerfTestBase {
             def targetSubscriptionId = subscriberIdPrefix + '-' + ( targetSubscriptionSequenceNumber > 0 ? targetSubscriptionSequenceNumber
                                                                                                      : 1 + random.nextInt(numberOfCmDataSubscribers) )
             if (subscribersAsSet.contains(targetSubscriptionId)) {
-                matches.put(it.xpath, subscribersAsArray)
+                matches.put(it, subscribersAsArray)
             }
         }
         return matches
index 513c00a..8d5139b 100644 (file)
@@ -9,7 +9,7 @@
         <property name="hibernate.connection.url">${DB_URL}</property>
         <property name="hibernate.connection.username">${DB_USERNAME}</property>
         <property name="hibernate.connection.password">${DB_PASSWORD}</property>
-        <property name="hibernate.dialect">org.hibernate.dialect.PostgreSQL82Dialect</property>
+        <property name="hibernate.dialect">org.hibernate.dialect.PostgreSQLDialect</property>
         <property name="show_sql">true</property>
         <property name="hibernate.hbm2ddl.auto">none</property>
     </session-factory>