2  * ============LICENSE_START=======================================================
 
   4  * ================================================================================
 
   5  * Copyright (C) 2018 AT&T Intellectual Property. All rights
 
   7  * ================================================================================
 
   8  * Licensed under the Apache License, Version 2.0 (the "License");
 
   9  * you may not use this file except in compliance with the License.
 
  10  * You may obtain a copy of the License at
 
  12  *      http://www.apache.org/licenses/LICENSE-2.0
 
  14  * Unless required by applicable law or agreed to in writing, software
 
  15  * distributed under the License is distributed on an "AS IS" BASIS,
 
  16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 
  17  * See the License for the specific language governing permissions and
 
  18  * limitations under the License.
 
  19  * ============LICENSE_END=========================================================
 
  22 package org.onap.ccsdk.sli.plugins.grtoolkit;
 
  24 import com.google.common.util.concurrent.Futures;
 
  25 import com.google.common.util.concurrent.ListenableFuture;
 
  26 import java.io.BufferedReader;
 
  28 import java.io.FileInputStream;
 
  29 import java.io.FileReader;
 
  30 import java.io.IOException;
 
  31 import java.io.InputStreamReader;
 
  32 import java.lang.reflect.Constructor;
 
  33 import java.lang.reflect.InvocationTargetException;
 
  34 import java.util.ArrayList;
 
  35 import java.util.Collection;
 
  36 import java.util.Comparator;
 
  37 import java.util.HashMap;
 
  38 import java.util.List;
 
  40 import java.util.Properties;
 
  41 import java.util.concurrent.ExecutorService;
 
  42 import java.util.concurrent.Executors;
 
  43 import javax.annotation.Nonnull;
 
  44 import org.apache.commons.lang.StringUtils;
 
  45 import org.json.JSONArray;
 
  46 import org.json.JSONObject;
 
  47 import org.onap.ccsdk.sli.core.dblib.DbLibService;
 
  48 import org.onap.ccsdk.sli.plugins.grtoolkit.connection.ConnectionManager;
 
  49 import org.onap.ccsdk.sli.plugins.grtoolkit.connection.ConnectionResponse;
 
  50 import org.onap.ccsdk.sli.plugins.grtoolkit.data.AdminHealth;
 
  51 import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterActor;
 
  52 import org.onap.ccsdk.sli.plugins.grtoolkit.data.DatabaseHealth;
 
  53 import org.onap.ccsdk.sli.plugins.grtoolkit.data.FailoverStatus;
 
  54 import org.onap.ccsdk.sli.plugins.grtoolkit.data.Health;
 
  55 import org.onap.ccsdk.sli.plugins.grtoolkit.data.MemberBuilder;
 
  56 import org.onap.ccsdk.sli.plugins.grtoolkit.data.PropertyKeys;
 
  57 import org.onap.ccsdk.sli.plugins.grtoolkit.data.SiteHealth;
 
  58 import org.onap.ccsdk.sli.plugins.grtoolkit.resolver.HealthResolver;
 
  59 import org.onap.ccsdk.sli.plugins.grtoolkit.resolver.SingleNodeHealthResolver;
 
  60 import org.onap.ccsdk.sli.plugins.grtoolkit.resolver.SixNodeHealthResolver;
 
  61 import org.onap.ccsdk.sli.plugins.grtoolkit.resolver.ThreeNodeHealthResolver;
 
  62 import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
 
  63 import org.opendaylight.mdsal.binding.api.DataBroker;
 
  64 import org.opendaylight.mdsal.binding.api.DataTreeChangeListener;
 
  65 import org.opendaylight.mdsal.binding.api.NotificationPublishService;
 
  66 import org.opendaylight.mdsal.binding.api.RpcProviderService;
 
  67 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.AdminHealthInput;
 
  68 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.AdminHealthOutput;
 
  69 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.AdminHealthOutputBuilder;
 
  70 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.ClusterHealthInput;
 
  71 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.ClusterHealthOutput;
 
  72 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.ClusterHealthOutputBuilder;
 
  73 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.DatabaseHealthInput;
 
  74 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.DatabaseHealthOutput;
 
  75 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.DatabaseHealthOutputBuilder;
 
  76 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.FailoverInput;
 
  77 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.FailoverOutput;
 
  78 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.FailoverOutputBuilder;
 
  79 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.GrToolkitService;
 
  80 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.HaltAkkaTrafficInput;
 
  81 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.HaltAkkaTrafficOutput;
 
  82 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.HaltAkkaTrafficOutputBuilder;
 
  83 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.Member;
 
  84 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.ResumeAkkaTrafficInput;
 
  85 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.ResumeAkkaTrafficOutput;
 
  86 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.ResumeAkkaTrafficOutputBuilder;
 
  87 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.Site;
 
  88 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.SiteHealthInput;
 
  89 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.SiteHealthOutput;
 
  90 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.SiteHealthOutputBuilder;
 
  91 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.SiteIdentifierInput;
 
  92 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.SiteIdentifierOutput;
 
  93 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.SiteIdentifierOutputBuilder;
 
  94 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.site.health.output.SitesBuilder;
 
  95 import org.opendaylight.yangtools.concepts.ObjectRegistration;
 
  96 import org.opendaylight.yangtools.yang.common.RpcResult;
 
  97 import org.opendaylight.yangtools.yang.common.RpcResultBuilder;
 
  98 import org.slf4j.Logger;
 
  99 import org.slf4j.LoggerFactory;
 
 102  * API implementation of the {@code GrToolkitService} interface generated from
 
 103  * the gr-toolkit.yang model. The RPCs contained within this class are meant to
 
 104  * run in an architecture agnostic fashion, where the response is repeatable
 
 105  * and predictable across any given node configuration. To facilitate this,
 
 106  * health checking and failover logic has been abstracted into the
 
 107  * {@code HealthResolver} classes.
 
 109  * Anyone who wishes to write a custom resolver for use with GR Toolkit should
 
 110  * extend the {@code HealthResolver} class. The currently provided resolvers
 
 111  * are useful references for further implementation.
 
 113  * @author Anthony Haddox
 
 114  * @see GrToolkitService
 
 115  * @see HealthResolver
 
 116  * @see SingleNodeHealthResolver
 
 117  * @see ThreeNodeHealthResolver
 
 118  * @see SixNodeHealthResolver
 
 120 public class GrToolkitProvider implements AutoCloseable, GrToolkitService, DataTreeChangeListener {
 
 121     private static final String APP_NAME = "gr-toolkit";
 
 122     private static final String PROPERTIES_FILE = System.getenv("SDNC_CONFIG_DIR") + "/gr-toolkit.properties";
 
 123     private String akkaConfig;
 
 124     private String httpProtocol;
 
 125     private String siteIdentifier = System.getenv("SITE_NAME");
 
 126     private final Logger log = LoggerFactory.getLogger(GrToolkitProvider.class);
 
 127     private final ExecutorService executor;
 
 128     protected DataBroker dataBroker;
 
 129     protected NotificationPublishService notificationService;
 
 130     protected RpcProviderService rpcRegistry;
 
 131     protected ObjectRegistration<GrToolkitService> rpcRegistration;
 
 132     protected DbLibService dbLib;
 
 133     private String member;
 
 134     private ClusterActor self;
 
 135     private HashMap<String, ClusterActor> memberMap;
 
 136     private Properties properties;
 
 137     private DistributedDataStoreInterface configDatastore;
 
 138     private HealthResolver resolver;
 
 141      * Constructs the provider for the GR Toolkit API. Dependencies are
 
 142      * injected using the GrToolkit.xml blueprint.
 
 144      * @param dataBroker The Data Broker
 
 145      * @param notificationProviderService The Notification Service
 
 146      * @param rpcProviderRegistry The RPC Registry
 
 147      * @param configDatastore The Configuration Data Store provided by the controller
 
 148      * @param dbLibService Reference to the controller provided DbLibService
 
 150     public GrToolkitProvider(DataBroker dataBroker,
 
 151                              NotificationPublishService notificationProviderService,
 
 152                              RpcProviderService rpcProviderRegistry,
 
 153                              DistributedDataStoreInterface configDatastore,
 
 154                              DbLibService dbLibService) {
 
 155         log.info("Creating provider for {}", APP_NAME);
 
 156         this.executor = Executors.newFixedThreadPool(1);
 
 157         this.dataBroker = dataBroker;
 
 158         this.notificationService = notificationProviderService;
 
 159         this.rpcRegistry = rpcProviderRegistry;
 
 160         this.configDatastore = configDatastore;
 
 161         this.dbLib = dbLibService;
 
 166      * Initializes some structures necessary to hold health check information
 
 167      * and perform failovers.
 
 169     private void initialize() {
 
 170         log.info("Initializing provider for {}", APP_NAME);
 
 174         rpcRegistration = rpcRegistry.registerRpcImplementation(GrToolkitService.class, this);
 
 175         log.info("Initialization complete for {}", APP_NAME);
 
 179      * Creates the {@code Properties} object with the contents of
 
 180      * gr-toolkit.properties, found at the {@code SDNC_CONFIG_DIR} directory,
 
 181      * which should be set as an environment variable. If the properties file
 
 182      * is not found, GR Toolkit will not function.
 
 184     private void setProperties() {
 
 185         log.info("Loading properties from {}", PROPERTIES_FILE);
 
 186         properties = new Properties();
 
 187         File propertiesFile = new File(PROPERTIES_FILE);
 
 188         if(!propertiesFile.exists()) {
 
 189             log.warn("setProperties(): Properties file not found.");
 
 191             try(FileInputStream fileInputStream = new FileInputStream(propertiesFile)) {
 
 192                 properties.load(fileInputStream);
 
 193                 if(!properties.containsKey(PropertyKeys.SITE_IDENTIFIER)) {
 
 194                     properties.put(PropertyKeys.SITE_IDENTIFIER, "Unknown Site");
 
 196                 httpProtocol = "true".equals(properties.getProperty(PropertyKeys.CONTROLLER_USE_SSL).trim()) ? "https://" : "http://";
 
 197                 akkaConfig = properties.getProperty(PropertyKeys.AKKA_CONF_LOCATION).trim();
 
 198                 if(StringUtils.isEmpty(siteIdentifier)) {
 
 199                     siteIdentifier = properties.getProperty(PropertyKeys.SITE_IDENTIFIER).trim();
 
 201                 log.info("setProperties(): Loaded properties.");
 
 202             } catch(IOException e) {
 
 203                 log.error("setProperties(): Error loading properties.", e);
 
 209      * Parses the akka.conf file used by the controller to define an akka
 
 210      * cluster. This method requires the <i>seed-nodes</i> definition to exist
 
 213     private void defineMembers() {
 
 214         member = configDatastore.getActorUtils().getCurrentMemberName().getName();
 
 215         log.info("defineMembers(): Cluster member: {}", member);
 
 217         log.info("defineMembers(): Parsing akka.conf for cluster memberMap...");
 
 219             File akkaConfigFile = new File(this.akkaConfig);
 
 220             try(FileReader fileReader = new FileReader(akkaConfigFile);
 
 221                 BufferedReader bufferedReader = new BufferedReader(fileReader)) {
 
 223                 while((line = bufferedReader.readLine()) != null) {
 
 224                     if(line.contains("seed-nodes =")) {
 
 225                         parseSeedNodes(line);
 
 230         } catch(IOException e) {
 
 231             log.error("defineMembers(): Couldn't load akka", e);
 
 232         } catch(NullPointerException e) {
 
 233             log.error("defineMembers(): akkaConfig is null. Check properties file and restart {} bundle.", APP_NAME);
 
 234             log.error("defineMembers(): NullPointerException", e);
 
 236         log.info("self:\n{}", self);
 
 240      * Sets up the {@code InstanceIdentifier}s for Data Store transactions.
 
 242     private void createContainers() {
 
 243         // Replace with MD-SAL write for FailoverStatus
 
 247      * Shuts down the {@code ExecutorService} and closes the RPC Provider Registry.
 
 250     public void close() throws Exception {
 
 251         log.info("Closing provider for {}", APP_NAME);
 
 253         rpcRegistration.close();
 
 254         log.info("close(): Successfully closed provider for {}", APP_NAME);
 
 258      * Listens for changes to the Data tree.
 
 260      * @param changes Data tree changes.
 
 263     public void onDataTreeChanged(@Nonnull Collection changes) {
 
 264         log.info("onDataTreeChanged(): No changes.");
 
 268      * Makes a call to {@code resolver.getClusterHealth()} to determine the
 
 269      * health of the akka clustered controllers.
 
 271      * @param input request body adhering to the model for
 
 272      *        {@code ClusterHealthInput}
 
 273      * @return response adhering to the model for {@code ClusterHealthOutput}
 
 274      * @see HealthResolver
 
 275      * @see ClusterHealthInput
 
 276      * @see ClusterHealthOutput
 
 279     public ListenableFuture<RpcResult<ClusterHealthOutput>> clusterHealth(ClusterHealthInput input) {
 
 280         log.info("{}:cluster-health invoked.", APP_NAME);
 
 281         resolver.getClusterHealth();
 
 282         return buildClusterHealthOutput();
 
 286      * Makes a call to {@code resolver.getSiteHealth()} to determine the health
 
 287      * of all of the application components of a site. In a multi-site config,
 
 288      * this will gather the health of all sites.
 
 290      * @param input request body adhering to the model for
 
 291      *        {@code SiteHealthInput}
 
 292      * @return response adhering to the model for {@code SiteHealthOutput}
 
 293      * @see HealthResolver
 
 294      * @see SiteHealthInput
 
 295      * @see SiteHealthOutput
 
 298     public ListenableFuture<RpcResult<SiteHealthOutput>> siteHealth(SiteHealthInput input) {
 
 299         log.info("{}:site-health invoked.", APP_NAME);
 
 300         List<SiteHealth> sites = resolver.getSiteHealth();
 
 301         return buildSiteHealthOutput(sites);
 
 305      * Makes a call to {@code resolver.getDatabaseHealth()} to determine the
 
 306      * health of the database(s) used by the controller.
 
 308      * @param input request body adhering to the model for
 
 309      *        {@code DatabaseHealthInput}
 
 310      * @return response adhering to the model for {@code DatabaseHealthOutput}
 
 311      * @see HealthResolver
 
 312      * @see DatabaseHealthInput
 
 313      * @see DatabaseHealthOutput
 
 316     public ListenableFuture<RpcResult<DatabaseHealthOutput>> databaseHealth(DatabaseHealthInput input) {
 
 317         log.info("{}:database-health invoked.", APP_NAME);
 
 318         DatabaseHealthOutputBuilder outputBuilder = new DatabaseHealthOutputBuilder();
 
 319         DatabaseHealth health = resolver.getDatabaseHealth();
 
 320         outputBuilder.setStatus(health.getHealth().equals(Health.HEALTHY) ? "200" : "500");
 
 321         outputBuilder.setHealth(health.getHealth().toString());
 
 322         outputBuilder.setServedBy(member);
 
 323         log.info("databaseHealth(): Health: {}", health.getHealth());
 
 324         return Futures.immediateFuture(RpcResultBuilder.<DatabaseHealthOutput>status(true).withResult(outputBuilder.build()).build());
 
 328      * Makes a call to {@code resolver.getAdminHealth()} to determine the
 
 329      * health of the administrative portal(s) used by the controller.
 
 331      * @param input request body adhering to the model for
 
 332      *        {@code AdminHealthInput}
 
 333      * @return response adhering to the model for {@code AdminHealthOutput}
 
 334      * @see HealthResolver
 
 335      * @see AdminHealthInput
 
 336      * @see AdminHealthOutput
 
 339     public ListenableFuture<RpcResult<AdminHealthOutput>> adminHealth(AdminHealthInput input) {
 
 340         log.info("{}:admin-health invoked.", APP_NAME);
 
 341         AdminHealthOutputBuilder outputBuilder = new AdminHealthOutputBuilder();
 
 342         AdminHealth adminHealth = resolver.getAdminHealth();
 
 343         outputBuilder.setStatus(Integer.toString(adminHealth.getStatusCode()));
 
 344         outputBuilder.setHealth(adminHealth.getHealth().toString());
 
 345         outputBuilder.setServedBy(member);
 
 346         log.info("adminHealth(): Status: {} | Health: {}", adminHealth.getStatusCode(), adminHealth.getHealth());
 
 347         return Futures.immediateFuture(RpcResultBuilder.<AdminHealthOutput>status(true).withResult(outputBuilder.build()).build());
 
 351      * Places IP Tables rules in place to drop akka communications traffic with
 
 352      * one or mode nodes. This method does not not perform any checks to see if
 
 353      * rules currently exist, and assumes success.
 
 355      * @param input request body adhering to the model for
 
 356      *        {@code HaltAkkaTrafficInput}
 
 357      * @return response adhering to the model for {@code HaltAkkaTrafficOutput}
 
 358      * @see HaltAkkaTrafficInput
 
 359      * @see HaltAkkaTrafficOutput
 
 362     public ListenableFuture<RpcResult<HaltAkkaTrafficOutput>> haltAkkaTraffic(HaltAkkaTrafficInput input) {
 
 363         log.info("{}:halt-akka-traffic invoked.", APP_NAME);
 
 364         HaltAkkaTrafficOutputBuilder outputBuilder = new HaltAkkaTrafficOutputBuilder();
 
 365         outputBuilder.setStatus("200");
 
 366         modifyIpTables(IpTables.ADD, input.getNodeInfo().toArray());
 
 367         outputBuilder.setServedBy(member);
 
 369         return Futures.immediateFuture(RpcResultBuilder.<HaltAkkaTrafficOutput>status(true).withResult(outputBuilder.build()).build());
 
 373      * Removes IP Tables rules in place to permit akka communications traffic
 
 374      * with one or mode nodes. This method does not not perform any checks to
 
 375      * see if rules currently exist, and assumes success.
 
 377      * @param input request body adhering to the model for
 
 378      *        {@code ResumeAkkaTrafficInput}
 
 379      * @return response adhering to the model for {@code ResumeAkkaTrafficOutput}
 
 380      * @see ResumeAkkaTrafficInput
 
 381      * @see ResumeAkkaTrafficOutput
 
 384     public ListenableFuture<RpcResult<ResumeAkkaTrafficOutput>> resumeAkkaTraffic(ResumeAkkaTrafficInput input) {
 
 385         log.info("{}:resume-akka-traffic invoked.", APP_NAME);
 
 386         ResumeAkkaTrafficOutputBuilder outputBuilder = new ResumeAkkaTrafficOutputBuilder();
 
 387         outputBuilder.setStatus("200");
 
 388         modifyIpTables(IpTables.DELETE, input.getNodeInfo().toArray());
 
 389         outputBuilder.setServedBy(member);
 
 391         return Futures.immediateFuture(RpcResultBuilder.<ResumeAkkaTrafficOutput>status(true).withResult(outputBuilder.build()).build());
 
 395      * Returns a canned response containing the identifier for this
 
 398      * @param input request body adhering to the model for
 
 399      *        {@code SiteIdentifierInput}
 
 400      * @return response adhering to the model for {@code SiteIdentifierOutput}
 
 401      * @see SiteIdentifierInput
 
 402      * @see SiteIdentifierOutput
 
 405     public ListenableFuture<RpcResult<SiteIdentifierOutput>> siteIdentifier(SiteIdentifierInput input) {
 
 406         log.info("{}:site-identifier invoked.", APP_NAME);
 
 407         SiteIdentifierOutputBuilder outputBuilder = new SiteIdentifierOutputBuilder();
 
 408         outputBuilder.setStatus("200");
 
 409         outputBuilder.setId(siteIdentifier);
 
 410         outputBuilder.setServedBy(member);
 
 411         return Futures.immediateFuture(RpcResultBuilder.<SiteIdentifierOutput>status(true).withResult(outputBuilder.build()).build());
 
 415      * Makes a call to {@code resolver.tryFailover()} to try a failover defined
 
 416      * by the active {@code HealthResolver}.
 
 418      * @param input request body adhering to the model for
 
 419      *        {@code FailoverInput}
 
 420      * @return response adhering to the model for {@code FailoverOutput}
 
 421      * @see HealthResolver
 
 423      * @see FailoverOutput
 
 426     public ListenableFuture<RpcResult<FailoverOutput>> failover(FailoverInput input) {
 
 427         log.info("{}:failover invoked.", APP_NAME);
 
 428         FailoverOutputBuilder outputBuilder = new FailoverOutputBuilder();
 
 429         FailoverStatus failoverStatus = resolver.tryFailover(input);
 
 430         outputBuilder.setServedBy(member);
 
 431         outputBuilder.setMessage(failoverStatus.getMessage());
 
 432         outputBuilder.setStatus(Integer.toString(failoverStatus.getStatusCode()));
 
 433         log.info("{}:{}.", APP_NAME, failoverStatus.getMessage());
 
 434         return Futures.immediateFuture(RpcResultBuilder.<FailoverOutput>status(true).withResult(outputBuilder.build()).build());
 
 438      * Performs an akka traffic isolation of the active site from the standby
 
 439      * site in an Active/Standby architecture. Invokes the
 
 440      * {@code halt-akka-traffic} RPC against the standby site nodes using the
 
 441      * information of the active site nodes.
 
 443      * @param activeSite list of nodes in the active site
 
 444      * @param standbySite list of nodes in the standby site
 
 445      * @param port http or https port of the controller
 
 446      * @deprecated No longer used since the refactor to use the HealthResolver
 
 447      *             pattern. Retained so the logic can be replicated later.
 
 450     private void isolateSiteFromCluster(ArrayList<ClusterActor> activeSite, ArrayList<ClusterActor> standbySite, String port) {
 
 451         log.info("isolateSiteFromCluster(): Halting Akka traffic...");
 
 452         for(ClusterActor actor : standbySite) {
 
 454                 log.info("Halting Akka traffic for: {}", actor.getNode());
 
 455                 // Build JSON with activeSite actor Node and actor AkkaPort
 
 456                 JSONObject akkaInput = new JSONObject();
 
 457                 JSONObject inputBlock = new JSONObject();
 
 458                 JSONArray votingStateArray = new JSONArray();
 
 460                 for(ClusterActor node : activeSite) {
 
 461                     nodeInfo = new JSONObject();
 
 462                     nodeInfo.put("node", node.getNode());
 
 463                     nodeInfo.put("port", node.getAkkaPort());
 
 464                     votingStateArray.put(nodeInfo);
 
 466                 inputBlock.put("node-info", votingStateArray);
 
 467                 akkaInput.put("input", inputBlock);
 
 468                 ConnectionResponse response = ConnectionManager.getConnectionResponse(httpProtocol + actor.getNode() + ":" + port + "/restconf/operations/gr-toolkit:halt-akka-traffic", ConnectionManager.HttpMethod.POST, akkaInput.toString(), "");
 
 469             } catch(IOException e) {
 
 470                 log.error("isolateSiteFromCluster(): Could not halt Akka traffic for: " + actor.getNode(), e);
 
 476      * Invokes the down unreachable action through the Jolokia mbean API.
 
 478      * @param activeSite list of nodes in the active site
 
 479      * @param standbySite list of nodes in the standby site
 
 480      * @param port http or https port of the controller
 
 481      * @deprecated No longer used since the refactor to use the HealthResolver
 
 482      *             pattern. Retained so the logic can be replicated later.
 
 485     private void downUnreachableNodes(ArrayList<ClusterActor> activeSite, ArrayList<ClusterActor> standbySite, String port) {
 
 486         log.info("downUnreachableNodes(): Setting site unreachable...");
 
 487         JSONObject jolokiaInput = new JSONObject();
 
 488         jolokiaInput.put("type", "EXEC");
 
 489         jolokiaInput.put("mbean", "akka:type=Cluster");
 
 490         jolokiaInput.put("operation", "down");
 
 491         JSONArray arguments = new JSONArray();
 
 492         for(ClusterActor actor : activeSite) {
 
 493             // Build Jolokia input
 
 494             // May need to change from akka port to actor.getAkkaPort()
 
 495             arguments.put("akka.tcp://opendaylight-cluster-data@" + actor.getNode() + ":" + properties.getProperty(PropertyKeys.CONTROLLER_PORT_AKKA));
 
 497         jolokiaInput.put("arguments", arguments);
 
 498         log.debug("downUnreachableNodes(): {}", jolokiaInput);
 
 500             log.info("downUnreachableNodes(): Setting nodes unreachable");
 
 501             ConnectionResponse response = ConnectionManager.getConnectionResponse(httpProtocol + standbySite.get(0).getNode() + ":" + port + "/jolokia", ConnectionManager.HttpMethod.POST, jolokiaInput.toString(), "");
 
 502         } catch(IOException e) {
 
 503             log.error("downUnreachableNodes(): Error setting nodes unreachable", e);
 
 508      * Triggers a data backup and export sequence of MD-SAL data. Invokes the
 
 509      * {@code data-export-import:schedule-export} RPC to schedule a data export
 
 510      * and subsequently the {@code daexim-offsite-backup:backup-data} RPC
 
 511      * against the active site to export and backup the data. Assumes the
 
 512      * controllers have the org.onap.ccsdk.sli.northbound.daeximoffsitebackup
 
 515      * @param activeSite list of nodes in the active site
 
 516      * @param port http or https port of the controller
 
 517      * @deprecated No longer used since the refactor to use the HealthResolver
 
 518      *             pattern. Retained so the logic can be replicated later.
 
 521     private void backupMdSal(ArrayList<ClusterActor> activeSite, String port) {
 
 522         log.info("backupMdSal(): Backing up data...");
 
 524             log.info("backupMdSal(): Scheduling backup for: {}", activeSite.get(0).getNode());
 
 525             ConnectionResponse response = ConnectionManager.getConnectionResponse(httpProtocol + activeSite.get(0).getNode() + ":" + port + "/restconf/operations/data-export-import:schedule-export", ConnectionManager.HttpMethod.POST, "{ \"input\": { \"run-at\": \"30\" } }", "");
 
 526         } catch(IOException e) {
 
 527             log.error("backupMdSal(): Error backing up MD-SAL", e);
 
 529         for(ClusterActor actor : activeSite) {
 
 532                 log.info("backupMdSal(): Backing up data for: {}", actor.getNode());
 
 533                 ConnectionResponse response = ConnectionManager.getConnectionResponse(httpProtocol + actor.getNode() + ":" + port + "/restconf/operations/daexim-offsite-backup:backup-data", ConnectionManager.HttpMethod.POST, null, "");
 
 534             } catch(IOException e) {
 
 535                 log.error("backupMdSal(): Error backing up data.", e);
 
 541      * Builds a response object for {@code clusterHealth()}. Sorts and iterates
 
 542      * over the contents of the {@code memberMap}, which contains the health
 
 543      * information of the cluster, and adds them to the {@code outputBuilder}.
 
 544      * If the ClusterActor is healthy, according to
 
 545      * {@code resolver.isControllerHealthy()}, the {@code ClusterHealthOutput}
 
 546      * status has a {@code 0} appended, otherwise a {@code 1} is appended. A
 
 547      * status of all zeroes denotes a healthy cluster. This status should be
 
 548      * easily decoded by tools which use the output.
 
 550      * @return future containing a completed {@code ClusterHealthOutput}
 
 552      * @see ClusterHealthOutput
 
 553      * @see HealthResolver
 
 555     @SuppressWarnings("unchecked")
 
 556     private ListenableFuture<RpcResult<ClusterHealthOutput>> buildClusterHealthOutput() {
 
 557         ClusterHealthOutputBuilder outputBuilder = new ClusterHealthOutputBuilder();
 
 558         outputBuilder.setServedBy(member);
 
 559         List memberList = new ArrayList<Member>();
 
 560         StringBuilder stat = new StringBuilder();
 
 563                 .sorted(Comparator.comparingInt(member -> Integer.parseInt(member.getMember().split("-")[1])))
 
 565                     memberList.add(new MemberBuilder(member).build());
 
 566                     // 0 is a healthy controller, 1 is unhealthy.
 
 567                     // The list is sorted so users can decode to find unhealthy nodes
 
 568                     // This will also let them figure out health on a per-site basis
 
 569                     // Depending on any tools they use with this API
 
 570                     if(resolver.isControllerHealthy(member)) {
 
 576         outputBuilder.setStatus(stat.toString());
 
 577         outputBuilder.setMembers(memberList);
 
 578         RpcResult<ClusterHealthOutput> rpcResult = RpcResultBuilder.<ClusterHealthOutput>status(true).withResult(outputBuilder.build()).build();
 
 579         return Futures.immediateFuture(rpcResult);
 
 583      * Builds a response object for {@code siteHealth()}. Iterates over a list
 
 584      * of {@code SiteHealth} objects and populates the {@code SiteHealthOutput}
 
 585      * with the information.
 
 587      * @param sites list of sites
 
 588      * @return future containing a completed {@code SiteHealthOutput}
 
 590      * @see HealthResolver
 
 592     @SuppressWarnings("unchecked")
 
 593     private ListenableFuture<RpcResult<SiteHealthOutput>> buildSiteHealthOutput(List<SiteHealth> sites) {
 
 594         SiteHealthOutputBuilder outputBuilder = new SiteHealthOutputBuilder();
 
 595         SitesBuilder siteBuilder = new SitesBuilder();
 
 596         outputBuilder.setStatus("200");
 
 597         outputBuilder.setSites((List) new ArrayList<Site>());
 
 599         for(SiteHealth site : sites) {
 
 600             siteBuilder.setHealth(site.getHealth().toString());
 
 601             siteBuilder.setRole(site.getRole());
 
 602             siteBuilder.setId(site.getId());
 
 603             outputBuilder.getSites().add(siteBuilder.build());
 
 604             log.info("buildSiteHealthOutput(): Health for {}: {}", site.getId(), site.getHealth().getHealth());
 
 607         outputBuilder.setServedBy(member);
 
 608         RpcResult<SiteHealthOutput> rpcResult = RpcResultBuilder.<SiteHealthOutput>status(true).withResult(outputBuilder.build()).build();
 
 609         return Futures.immediateFuture(rpcResult);
 
 613      * Parses a line containing the akka networking information of the akka
 
 614      * controller cluster. Assumes entries of the format:
 
 616      * akka.tcp://opendaylight-cluster-data@<FQDN>:<AKKA_PORT>
 
 618      * The information is stored in a {@code ClusterActor} object, and then
 
 619      * added to the memberMap HashMap, with the {@code FQDN} as the key. The
 
 620      * final step is a call to {@code createHealthResolver} to create the
 
 621      * health resolver for the provider.
 
 623      * @param line the line containing all of the seed nodes
 
 625      * @see HealthResolver
 
 627     private void parseSeedNodes(String line) {
 
 628         memberMap = new HashMap<>();
 
 629         line = line.substring(line.indexOf("[\""), line.indexOf(']'));
 
 630         String[] splits = line.split(",");
 
 632         for(int ndx = 0; ndx < splits.length; ndx++) {
 
 633             String nodeName = splits[ndx];
 
 634             int delimLocation = nodeName.indexOf('@');
 
 635             String port = nodeName.substring(splits[ndx].indexOf(':', delimLocation) + 1, splits[ndx].indexOf('"', splits[ndx].indexOf(':')));
 
 636             splits[ndx] = nodeName.substring(delimLocation + 1, splits[ndx].indexOf(':', delimLocation));
 
 637             log.info("parseSeedNodes(): Adding node: {}:{}", splits[ndx], port);
 
 638             ClusterActor clusterActor = new ClusterActor();
 
 639             clusterActor.setNode(splits[ndx]);
 
 640             clusterActor.setAkkaPort(port);
 
 641             clusterActor.setMember("member-" + (ndx + 1));
 
 642             if(member.equals(clusterActor.getMember())) {
 
 645             memberMap.put(clusterActor.getNode(), clusterActor);
 
 646             log.info("parseSeedNodes(): {}", clusterActor);
 
 649         createHealthResolver();
 
 653      * Creates the specific health resolver requested by the user, as specified
 
 654      * in the gr-toolkit.properties file. If a resolver is not specified, or
 
 655      * there is an issue creating the resolver, it will use a fallback resolver
 
 656      * based on how many nodes are added to the memberMap HashMap.
 
 658      * @see HealthResolver
 
 659      * @see SingleNodeHealthResolver
 
 660      * @see ThreeNodeHealthResolver
 
 661      * @see SixNodeHealthResolver
 
 663     private void createHealthResolver() {
 
 664         log.info("createHealthResolver(): Creating health resolver...");
 
 666             Class resolverClass = null;
 
 667             String userDefinedResolver = properties.getProperty(PropertyKeys.RESOLVER);
 
 668             if(StringUtils.isEmpty(userDefinedResolver)) {
 
 669                 throw new InstantiationException();
 
 671             resolverClass = Class.forName(userDefinedResolver);
 
 672             Class[] types = { Map.class , properties.getClass(), DbLibService.class };
 
 673             Constructor<HealthResolver> constructor = resolverClass.getConstructor(types);
 
 674             Object[] parameters = { memberMap, properties, dbLib };
 
 675             resolver = constructor.newInstance(parameters);
 
 676             log.info("createHealthResolver(): Created resolver from name {}", resolver.toString());
 
 677         } catch(ClassNotFoundException | InstantiationException | InvocationTargetException | NoSuchMethodException | IllegalAccessException e) {
 
 678             log.warn("createHealthResolver(): Could not create user defined resolver", e);
 
 679             if(memberMap.size() == 1) {
 
 680                 log.info("createHealthResolver(): FALLBACK: Initializing SingleNodeHealthResolver...");
 
 681                 resolver  = new SingleNodeHealthResolver(memberMap, properties, dbLib);
 
 682             } else if(memberMap.size() == 3) {
 
 683                 log.info("createHealthResolver(): FALLBACK: Initializing ThreeNodeHealthResolver...");
 
 684                 resolver  = new ThreeNodeHealthResolver(memberMap, properties, dbLib);
 
 685             } else if(memberMap.size() == 6) {
 
 686                 log.info("createHealthResolver(): FALLBACK: Initializing SixNodeHealthResolver...");
 
 687                 resolver  = new SixNodeHealthResolver(memberMap, properties, dbLib);
 
 693      * Adds or drops IPTables rules to block or resume akka traffic for a node
 
 694      * in the akka cluster. Assumes that the user or group that the controller
 
 695      * is run as has the ability to run sudo /sbin/iptables without requiring a
 
 696      * password. This method will run indefinitely if that assumption is not
 
 697      * correct. This method does not check to see if any rules around the node
 
 698      * are preexisting, so multiple uses will result in multiple additions and
 
 699      * removals from IPTables.
 
 701      * @param task the operation to be performed against IPTables
 
 702      * @param nodeInfo array containing the nodes to be added or dropped from
 
 705     private void modifyIpTables(IpTables task, Object[] nodeInfo) {
 
 706         log.info("modifyIpTables(): Modifying IPTables rules...");
 
 707         if(task == IpTables.ADD) {
 
 708             for(Object node : nodeInfo) {
 
 709                 org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.halt.akka.traffic.input.NodeInfo n =
 
 710                         (org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.halt.akka.traffic.input.NodeInfo) node;
 
 711                 log.info("modifyIpTables(): Isolating {}", n.getNode());
 
 712                 executeCommand(String.format("sudo /sbin/iptables -A INPUT -p tcp --destination-port %s -j DROP -s %s", properties.get(PropertyKeys.CONTROLLER_PORT_AKKA), n.getNode()));
 
 713                 executeCommand(String.format("sudo /sbin/iptables -A OUTPUT -p tcp --destination-port %s -j DROP -d %s", n.getPort(), n.getNode()));
 
 715         } else if(task == IpTables.DELETE) {
 
 716             for(Object node : nodeInfo) {
 
 717                 org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.resume.akka.traffic.input.NodeInfo n =
 
 718                         (org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.resume.akka.traffic.input.NodeInfo) node;
 
 719                 log.info("modifyIpTables(): De-isolating {}", n.getNode());
 
 720                 executeCommand(String.format("sudo /sbin/iptables -D INPUT -p tcp --destination-port %s -j DROP -s %s", properties.get(PropertyKeys.CONTROLLER_PORT_AKKA), n.getNode()));
 
 721                 executeCommand(String.format("sudo /sbin/iptables -D OUTPUT -p tcp --destination-port %s -j DROP -d %s", n.getPort(), n.getNode()));
 
 724         if(nodeInfo.length > 0) {
 
 725             executeCommand("sudo /sbin/iptables -L");
 
 730      * Opens a shell session and executes a command.
 
 732      * @param command the shell command to execute
 
 734     private void executeCommand(String command) {
 
 735         log.info("executeCommand(): Executing command: {}", command);
 
 736         String[] cmd = command.split(" ");
 
 738             Process p = Runtime.getRuntime().exec(cmd);
 
 739             BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(p.getInputStream()));
 
 741             StringBuilder content = new StringBuilder();
 
 742             while((inputLine = bufferedReader.readLine()) != null) {
 
 743                 content.append(inputLine);
 
 745             bufferedReader.close();
 
 746             log.info("executeCommand(): {}", content);
 
 747         } catch(IOException e) {
 
 748             log.error("executeCommand(): Error executing command", e);
 
 753      * The IPTables operations this module can perform.