<artifactId>mockito-core</artifactId>
             <scope>test</scope>
         </dependency>
+        <!-- WireMock used to create test web server -->
+        <!-- Using same dependency found in MSO Utils -->
+        <dependency>
+            <groupId>org.springframework.cloud</groupId>
+            <artifactId>spring-cloud-contract-wiremock</artifactId>
+            <version>1.2.4.RELEASE</version>
+            <scope>test</scope>
+        </dependency>
         <dependency>
             <groupId>org.json</groupId>
             <artifactId>json</artifactId>
 
 import java.io.FileReader;
 import java.io.IOException;
 import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.net.HttpURLConnection;
-import java.net.URL;
-import java.nio.charset.StandardCharsets;
-import java.sql.Connection;
-import java.sql.SQLException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Properties;
 import java.util.List;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
 import javax.annotation.Nonnull;
 
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
 
+import org.apache.commons.lang.StringUtils;
+
 import org.onap.ccsdk.sli.core.dblib.DbLibService;
+import org.onap.ccsdk.sli.plugins.grtoolkit.connection.ConnectionManager;
+import org.onap.ccsdk.sli.plugins.grtoolkit.connection.ConnectionResponse;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.AdminHealth;
 import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterActor;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.DatabaseHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.FailoverStatus;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.Health;
 import org.onap.ccsdk.sli.plugins.grtoolkit.data.MemberBuilder;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.PropertyKeys;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.SiteHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.resolver.HealthResolver;
+import org.onap.ccsdk.sli.plugins.grtoolkit.resolver.SingleNodeHealthResolver;
+import org.onap.ccsdk.sli.plugins.grtoolkit.resolver.SixNodeHealthResolver;
+import org.onap.ccsdk.sli.plugins.grtoolkit.resolver.ThreeNodeHealthResolver;
 
 import org.json.JSONArray;
-import org.json.JSONException;
 import org.json.JSONObject;
 
 import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+/**
+ * API implementation of the {@code GrToolkitService} interface generated from
+ * the gr-toolkit.yang model. The RPCs contained within this class are meant to
+ * run in an architecture agnostic fashion, where the response is repeatable
+ * and predictable across any given node configuration. To facilitate this,
+ * health checking and failover logic has been abstracted into the
+ * {@code HealthResolver} classes.
+ * <p>
+ * Anyone who wishes to write a custom resolver for use with GR Toolkit should
+ * extend the {@code HealthResolver} class. The currently provided resolvers
+ * are useful references for further implementation.
+ *
+ * @author Anthony Haddox
+ * @see GrToolkitService
+ * @see HealthResolver
+ * @see SingleNodeHealthResolver
+ * @see ThreeNodeHealthResolver
+ * @see SixNodeHealthResolver
+ */
 public class GrToolkitProvider implements AutoCloseable, GrToolkitService, DataTreeChangeListener {
     private static final String APP_NAME = "gr-toolkit";
     private static final String PROPERTIES_FILE = System.getenv("SDNC_CONFIG_DIR") + "/gr-toolkit.properties";
-    private static final String HEALTHY = "HEALTHY";
-    private static final String FAULTY = "FAULTY";
-    private static final String VALUE = "value";
-    private static final String OUTPUT = "output";
-    private static final int CONNECTION_TIMEOUT = 5000; // 5 second timeout
     private String akkaConfig;
-    private String jolokiaClusterPath;
-    private String shardManagerPath;
-    private String shardPathTemplate;
-    private String credentials;
     private String httpProtocol;
     private String siteIdentifier = System.getenv("SITE_NAME");
     private final Logger log = LoggerFactory.getLogger(GrToolkitProvider.class);
     private String member;
     private ClusterActor self;
     private HashMap<String, ClusterActor> memberMap;
-    private SiteConfiguration siteConfiguration;
     private Properties properties;
     private DistributedDataStoreInterface configDatastore;
+    private HealthResolver resolver;
+
+    /**
+     * Constructs the provider for the GR Toolkit API. Dependencies are
+     * injected using the GrToolkit.xml blueprint.
+     *
+     * @param dataBroker The Data Broker
+     * @param notificationProviderService The Notification Service
+     * @param rpcProviderRegistry The RPC Registry
+     * @param configDatastore The Configuration Data Store provided by the controller
+     * @param dbLibService Reference to the controller provided DbLibService
+     */
     public GrToolkitProvider(DataBroker dataBroker,
                              NotificationPublishService notificationProviderService,
                              RpcProviderRegistry rpcProviderRegistry,
                              DistributedDataStoreInterface configDatastore,
                              DbLibService dbLibService) {
-        this.log.info("Creating provider for {}", APP_NAME);
+        log.info("Creating provider for {}", APP_NAME);
         this.executor = Executors.newFixedThreadPool(1);
         this.dataBroker = dataBroker;
         this.notificationService = notificationProviderService;
         initialize();
     }
 
+    /**
+     * Initializes some structures necessary to hold health check information
+     * and perform failovers.
+     */
     private void initialize() {
         log.info("Initializing provider for {}", APP_NAME);
-        // Create the top level containers
         createContainers();
         setProperties();
         defineMembers();
-
         rpcRegistration = rpcRegistry.addRpcImplementation(GrToolkitService.class, this);
         log.info("Initialization complete for {}", APP_NAME);
     }
 
+    /**
+     * Creates the {@code Properties} object with the contents of
+     * gr-toolkit.properties, found at the {@code SDNC_CONFIG_DIR} directory,
+     * which should be set as an environment variable. If the properties file
+     * is not found, GR Toolkit will not function.
+     */
     private void setProperties() {
         log.info("Loading properties from {}", PROPERTIES_FILE);
         properties = new Properties();
         File propertiesFile = new File(PROPERTIES_FILE);
         if(!propertiesFile.exists()) {
-            log.warn("Properties file not found.");
-            return;
-        }
-        try(FileInputStream fileInputStream = new FileInputStream(propertiesFile)) {
-            properties.load(fileInputStream);
-            if(!properties.containsKey(PropertyKeys.SITE_IDENTIFIER)) {
-                properties.put(PropertyKeys.SITE_IDENTIFIER, "Unknown Site");
-            }
-            String port = "true".equals(properties.getProperty(PropertyKeys.CONTROLLER_USE_SSL).trim()) ? properties.getProperty(PropertyKeys.CONTROLLER_PORT_SSL).trim() : properties.getProperty(PropertyKeys.CONTROLLER_PORT_HTTP).trim();
-            httpProtocol = "true".equals(properties.getProperty(PropertyKeys.CONTROLLER_USE_SSL).trim()) ? "https://" : "http://";
-            akkaConfig = properties.getProperty(PropertyKeys.AKKA_CONF_LOCATION).trim();
-            jolokiaClusterPath = ":" + port + properties.getProperty(PropertyKeys.MBEAN_CLUSTER).trim();
-            shardManagerPath = ":" + port + properties.getProperty(PropertyKeys.MBEAN_SHARD_MANAGER).trim();
-            shardPathTemplate = ":" + port + properties.getProperty(PropertyKeys.MBEAN_SHARD_CONFIG).trim();
-            if(siteIdentifier == null || siteIdentifier.isEmpty()) {
-                siteIdentifier = properties.getProperty(PropertyKeys.SITE_IDENTIFIER).trim();
+            log.warn("setProperties(): Properties file not found.");
+        } else {
+            try(FileInputStream fileInputStream = new FileInputStream(propertiesFile)) {
+                properties.load(fileInputStream);
+                if(!properties.containsKey(PropertyKeys.SITE_IDENTIFIER)) {
+                    properties.put(PropertyKeys.SITE_IDENTIFIER, "Unknown Site");
+                }
+                httpProtocol = "true".equals(properties.getProperty(PropertyKeys.CONTROLLER_USE_SSL).trim()) ? "https://" : "http://";
+                akkaConfig = properties.getProperty(PropertyKeys.AKKA_CONF_LOCATION).trim();
+                if(StringUtils.isEmpty(siteIdentifier)) {
+                    siteIdentifier = properties.getProperty(PropertyKeys.SITE_IDENTIFIER).trim();
+                }
+                log.info("setProperties(): Loaded properties.");
+            } catch(IOException e) {
+                log.error("setProperties(): Error loading properties.", e);
             }
-            credentials = properties.getProperty(PropertyKeys.CONTROLLER_CREDENTIALS).trim();
-            log.info("Loaded properties.");
-        } catch(IOException e) {
-            log.error("Error loading properties.", e);
         }
     }
 
+    /**
+     * Parses the akka.conf file used by the controller to define an akka
+     * cluster. This method requires the <i>seed-nodes</i> definition to exist
+     * on a single line.
+     */
     private void defineMembers() {
         member = configDatastore.getActorContext().getCurrentMemberName().getName();
-        log.info("Cluster member: {}", member);
+        log.info("defineMembers(): Cluster member: {}", member);
 
-        log.info("Parsing akka.conf for cluster memberMap...");
+        log.info("defineMembers(): Parsing akka.conf for cluster memberMap...");
         try {
             File akkaConfigFile = new File(this.akkaConfig);
             try(FileReader fileReader = new FileReader(akkaConfigFile);
                 }
             }
         } catch(IOException e) {
-            log.error("Couldn't load akka", e);
+            log.error("defineMembers(): Couldn't load akka", e);
         } catch(NullPointerException e) {
-            log.error("akkaConfig is null. Check properties file and restart {} bundle.", APP_NAME);
+            log.error("defineMembers(): akkaConfig is null. Check properties file and restart {} bundle.", APP_NAME);
+            log.error("defineMembers(): NullPointerException", e);
         }
         log.info("self:\n{}", self);
     }
 
+    /**
+     * Sets up the {@code InstanceIdentifier}s for Data Store transactions.
+     */
     private void createContainers() {
         // Replace with MD-SAL write for FailoverStatus
     }
 
-    protected void initializeChild() {
-        // Override if you have custom initialization intelligence
-    }
-
+    /**
+     * Shuts down the {@code ExecutorService} and closes the RPC Provider Registry.
+     */
     @Override
     public void close() throws Exception {
         log.info("Closing provider for {}", APP_NAME);
         executor.shutdown();
         rpcRegistration.close();
-        log.info("Successfully closed provider for {}", APP_NAME);
+        log.info("close(): Successfully closed provider for {}", APP_NAME);
     }
 
+    /**
+     * Listens for changes to the Data tree.
+     *
+     * @param changes Data tree changes.
+     */
     @Override
     public void onDataTreeChanged(@Nonnull Collection changes) {
-        log.info("onDataTreeChanged() called. but there is no change here");
-    }
-
+        log.info("onDataTreeChanged(): No changes.");
+    }
+
+    /**
+     * Makes a call to {@code resolver.getClusterHealth()} to determine the
+     * health of the akka clustered controllers.
+     *
+     * @param input request body adhering to the model for
+     *        {@code ClusterHealthInput}
+     * @return response adhering to the model for {@code ClusterHealthOutput}
+     * @see HealthResolver
+     * @see ClusterHealthInput
+     * @see ClusterHealthOutput
+     */
     @Override
     public ListenableFuture<RpcResult<ClusterHealthOutput>> clusterHealth(ClusterHealthInput input) {
         log.info("{}:cluster-health invoked.", APP_NAME);
-        getControllerHealth();
-        return buildClusterHealthOutput("200");
-    }
-
+        resolver.getClusterHealth();
+        return buildClusterHealthOutput();
+    }
+
+    /**
+     * Makes a call to {@code resolver.getSiteHealth()} to determine the health
+     * of all of the application components of a site. In a multi-site config,
+     * this will gather the health of all sites.
+     *
+     * @param input request body adhering to the model for
+     *        {@code SiteHealthInput}
+     * @return response adhering to the model for {@code SiteHealthOutput}
+     * @see HealthResolver
+     * @see SiteHealthInput
+     * @see SiteHealthOutput
+     */
     @Override
     public ListenableFuture<RpcResult<SiteHealthOutput>> siteHealth(SiteHealthInput input) {
         log.info("{}:site-health invoked.", APP_NAME);
-        getControllerHealth();
-        return buildSiteHealthOutput("200", getAdminHealth(), getDatabaseHealth());
-    }
-
+        List<SiteHealth> sites = resolver.getSiteHealth();
+        return buildSiteHealthOutput(sites);
+    }
+
+    /**
+     * Makes a call to {@code resolver.getDatabaseHealth()} to determine the
+     * health of the database(s) used by the controller.
+     *
+     * @param input request body adhering to the model for
+     *        {@code DatabaseHealthInput}
+     * @return response adhering to the model for {@code DatabaseHealthOutput}
+     * @see HealthResolver
+     * @see DatabaseHealthInput
+     * @see DatabaseHealthOutput
+     */
     @Override
     public ListenableFuture<RpcResult<DatabaseHealthOutput>> databaseHealth(DatabaseHealthInput input) {
         log.info("{}:database-health invoked.", APP_NAME);
         DatabaseHealthOutputBuilder outputBuilder = new DatabaseHealthOutputBuilder();
-        outputBuilder.setStatus("200");
-        outputBuilder.setHealth(getDatabaseHealth());
+        DatabaseHealth health = resolver.getDatabaseHealth();
+        outputBuilder.setStatus(health.getHealth().equals(Health.HEALTHY) ? "200" : "500");
+        outputBuilder.setHealth(health.getHealth().toString());
         outputBuilder.setServedBy(member);
-
+        log.info("databaseHealth(): Health: {}", health.getHealth());
         return Futures.immediateFuture(RpcResultBuilder.<DatabaseHealthOutput>status(true).withResult(outputBuilder.build()).build());
     }
 
+    /**
+     * Makes a call to {@code resolver.getAdminHealth()} to determine the
+     * health of the administrative portal(s) used by the controller.
+     *
+     * @param input request body adhering to the model for
+     *        {@code AdminHealthInput}
+     * @return response adhering to the model for {@code AdminHealthOutput}
+     * @see HealthResolver
+     * @see AdminHealthInput
+     * @see AdminHealthOutput
+     */
     @Override
     public ListenableFuture<RpcResult<AdminHealthOutput>> adminHealth(AdminHealthInput input) {
         log.info("{}:admin-health invoked.", APP_NAME);
         AdminHealthOutputBuilder outputBuilder = new AdminHealthOutputBuilder();
-        outputBuilder.setStatus("200");
-        outputBuilder.setHealth(getAdminHealth());
+        AdminHealth adminHealth = resolver.getAdminHealth();
+        outputBuilder.setStatus(Integer.toString(adminHealth.getStatusCode()));
+        outputBuilder.setHealth(adminHealth.getHealth().toString());
         outputBuilder.setServedBy(member);
-        log.info(outputBuilder.build().toString());
+        log.info("adminHealth(): Status: {} | Health: {}", adminHealth.getStatusCode(), adminHealth.getHealth());
         return Futures.immediateFuture(RpcResultBuilder.<AdminHealthOutput>status(true).withResult(outputBuilder.build()).build());
     }
 
+    /**
+     * Places IP Tables rules in place to drop akka communications traffic with
+     * one or mode nodes. This method does not not perform any checks to see if
+     * rules currently exist, and assumes success.
+     *
+     * @param input request body adhering to the model for
+     *        {@code HaltAkkaTrafficInput}
+     * @return response adhering to the model for {@code HaltAkkaTrafficOutput}
+     * @see HaltAkkaTrafficInput
+     * @see HaltAkkaTrafficOutput
+     */
     @Override
     public ListenableFuture<RpcResult<HaltAkkaTrafficOutput>> haltAkkaTraffic(HaltAkkaTrafficInput input) {
         log.info("{}:halt-akka-traffic invoked.", APP_NAME);
         return Futures.immediateFuture(RpcResultBuilder.<HaltAkkaTrafficOutput>status(true).withResult(outputBuilder.build()).build());
     }
 
+    /**
+     * Removes IP Tables rules in place to permit akka communications traffic
+     * with one or mode nodes. This method does not not perform any checks to
+     * see if rules currently exist, and assumes success.
+     *
+     * @param input request body adhering to the model for
+     *        {@code ResumeAkkaTrafficInput}
+     * @return response adhering to the model for {@code ResumeAkkaTrafficOutput}
+     * @see ResumeAkkaTrafficInput
+     * @see ResumeAkkaTrafficOutput
+     */
     @Override
     public ListenableFuture<RpcResult<ResumeAkkaTrafficOutput>> resumeAkkaTraffic(ResumeAkkaTrafficInput input) {
         log.info("{}:resume-akka-traffic invoked.", APP_NAME);
         return Futures.immediateFuture(RpcResultBuilder.<ResumeAkkaTrafficOutput>status(true).withResult(outputBuilder.build()).build());
     }
 
+    /**
+     * Returns a canned response containing the identifier for this
+     * controller's site.
+     *
+     * @param input request body adhering to the model for
+     *        {@code SiteIdentifierInput}
+     * @return response adhering to the model for {@code SiteIdentifierOutput}
+     * @see SiteIdentifierInput
+     * @see SiteIdentifierOutput
+     */
     @Override
     public ListenableFuture<RpcResult<SiteIdentifierOutput>> siteIdentifier(SiteIdentifierInput input) {
         log.info("{}:site-identifier invoked.", APP_NAME);
         outputBuilder.setStatus("200");
         outputBuilder.setId(siteIdentifier);
         outputBuilder.setServedBy(member);
-
         return Futures.immediateFuture(RpcResultBuilder.<SiteIdentifierOutput>status(true).withResult(outputBuilder.build()).build());
     }
 
+    /**
+     * Makes a call to {@code resolver.tryFailover()} to try a failover defined
+     * by the active {@code HealthResolver}.
+     *
+     * @param input request body adhering to the model for
+     *        {@code FailoverInput}
+     * @return response adhering to the model for {@code FailoverOutput}
+     * @see HealthResolver
+     * @see FailoverInput
+     * @see FailoverOutput
+     */
     @Override
     public ListenableFuture<RpcResult<FailoverOutput>> failover(FailoverInput input) {
         log.info("{}:failover invoked.", APP_NAME);
         FailoverOutputBuilder outputBuilder = new FailoverOutputBuilder();
+        FailoverStatus failoverStatus = resolver.tryFailover(input);
         outputBuilder.setServedBy(member);
-        if(siteConfiguration != SiteConfiguration.GEO) {
-            log.info("Cannot failover non-GEO site.");
-            outputBuilder.setMessage("Failover aborted. This is not a GEO configuration.");
-            outputBuilder.setStatus("400");
-            return Futures.immediateFuture(RpcResultBuilder.<FailoverOutput>status(true).withResult(outputBuilder.build()).build());
-        }
-        ArrayList<ClusterActor> activeSite = new ArrayList<>();
-        ArrayList<ClusterActor> standbySite = new ArrayList<>();
-
-        log.info("Performing preliminary cluster health check...");
-        // Necessary to populate all member info. Health is not used for judgement calls.
-        getControllerHealth();
-
-        log.info("Determining active site...");
-        for(Map.Entry<String, ClusterActor> entry : memberMap.entrySet()) {
-            String key = entry.getKey();
-            ClusterActor clusterActor = entry.getValue();
-            if(clusterActor.isVoting()) {
-                activeSite.add(clusterActor);
-                log.debug("Active Site member: {}", key);
-            }
-            else {
-                standbySite.add(clusterActor);
-                log.debug("Standby Site member: {}", key);
-            }
-        }
-
-        String port = "true".equals(properties.getProperty(PropertyKeys.CONTROLLER_USE_SSL)) ? properties.getProperty(PropertyKeys.CONTROLLER_PORT_SSL) : properties.getProperty(PropertyKeys.CONTROLLER_PORT_HTTP);
-
-        if(Boolean.parseBoolean(input.getBackupData())) {
-            backupMdSal(activeSite, port);
-        }
-
-        if(!changeClusterVoting(outputBuilder, activeSite, standbySite, port))
-            return Futures.immediateFuture(RpcResultBuilder.<FailoverOutput>status(true).withResult(outputBuilder.build()).build());
-
-        if(Boolean.parseBoolean(input.getIsolate())) {
-            isolateSiteFromCluster(activeSite, standbySite, port);
-
-            if(Boolean.parseBoolean(input.getDownUnreachable())) {
-                downUnreachableNodes(activeSite, standbySite, port);
-            }
-        }
-
-        log.info("{}:failover complete.", APP_NAME);
-
-        outputBuilder.setMessage("Failover complete.");
-        outputBuilder.setStatus("200");
+        outputBuilder.setMessage(failoverStatus.getMessage());
+        outputBuilder.setStatus(Integer.toString(failoverStatus.getStatusCode()));
+        log.info("{}:{}.", APP_NAME, failoverStatus.getMessage());
         return Futures.immediateFuture(RpcResultBuilder.<FailoverOutput>status(true).withResult(outputBuilder.build()).build());
     }
 
+    /**
+     * Performs an akka traffic isolation of the active site from the standby
+     * site in an Active/Standby architecture. Invokes the
+     * {@code halt-akka-traffic} RPC against the standby site nodes using the
+     * information of the active site nodes.
+     *
+     * @param activeSite list of nodes in the active site
+     * @param standbySite list of nodes in the standby site
+     * @param port http or https port of the controller
+     * @deprecated No longer used since the refactor to use the HealthResolver
+     *             pattern. Retained so the logic can be replicated later.
+     */
     private void isolateSiteFromCluster(ArrayList<ClusterActor> activeSite, ArrayList<ClusterActor> standbySite, String port) {
-        log.info("Halting Akka traffic...");
+        log.info("isolateSiteFromCluster(): Halting Akka traffic...");
         for(ClusterActor actor : standbySite) {
             try {
                 log.info("Halting Akka traffic for: {}", actor.getNode());
-                // Build JSON with activeSite actor Node and actor  AkkaPort
+                // Build JSON with activeSite actor Node and actor AkkaPort
                 JSONObject akkaInput = new JSONObject();
                 JSONObject inputBlock = new JSONObject();
                 JSONArray votingStateArray = new JSONArray();
                 }
                 inputBlock.put("node-info", votingStateArray);
                 akkaInput.put("input", inputBlock);
-                getRequestContent(httpProtocol + actor.getNode() + ":" + port + "/restconf/operations/gr-toolkit:halt-akka-traffic", HttpMethod.POST, akkaInput.toString());
+                ConnectionResponse response = ConnectionManager.getConnectionResponse(httpProtocol + actor.getNode() + ":" + port + "/restconf/operations/gr-toolkit:halt-akka-traffic", ConnectionManager.HttpMethod.POST, akkaInput.toString(), "");
             } catch(IOException e) {
-                log.error("Could not halt Akka traffic for: " + actor.getNode(), e);
+                log.error("isolateSiteFromCluster(): Could not halt Akka traffic for: " + actor.getNode(), e);
             }
         }
     }
 
+    /**
+     * Invokes the down unreachable action through the Jolokia mbean API.
+     *
+     * @param activeSite list of nodes in the active site
+     * @param standbySite list of nodes in the standby site
+     * @param port http or https port of the controller
+     * @deprecated No longer used since the refactor to use the HealthResolver
+     *             pattern. Retained so the logic can be replicated later.
+     */
     private void downUnreachableNodes(ArrayList<ClusterActor> activeSite, ArrayList<ClusterActor> standbySite, String port) {
-        log.info("Setting site unreachable...");
+        log.info("downUnreachableNodes(): Setting site unreachable...");
         JSONObject jolokiaInput = new JSONObject();
         jolokiaInput.put("type", "EXEC");
         jolokiaInput.put("mbean", "akka:type=Cluster");
             arguments.put("akka.tcp://opendaylight-cluster-data@" + actor.getNode() + ":" + properties.getProperty(PropertyKeys.CONTROLLER_PORT_AKKA));
         }
         jolokiaInput.put("arguments", arguments);
-        log.debug("{}", jolokiaInput);
+        log.debug("downUnreachableNodes(): {}", jolokiaInput);
         try {
-            log.info("Setting nodes unreachable");
-            getRequestContent(httpProtocol + standbySite.get(0).getNode() + ":" + port + "/jolokia", HttpMethod.POST, jolokiaInput.toString());
+            log.info("downUnreachableNodes(): Setting nodes unreachable");
+            ConnectionResponse response = ConnectionManager.getConnectionResponse(httpProtocol + standbySite.get(0).getNode() + ":" + port + "/jolokia", ConnectionManager.HttpMethod.POST, jolokiaInput.toString(), "");
         } catch(IOException e) {
-            log.error("Error setting nodes unreachable", e);
-        }
-    }
-
-    private boolean changeClusterVoting(FailoverOutputBuilder outputBuilder, ArrayList<ClusterActor> activeSite, ArrayList<ClusterActor> standbySite, String port) {
-        log.info("Changing voting for all shards to standby site...");
-        try {
-            JSONObject votingInput = new JSONObject();
-            JSONObject inputBlock = new JSONObject();
-            JSONArray votingStateArray = new JSONArray();
-            JSONObject memberVotingState;
-            for(ClusterActor actor : activeSite) {
-                memberVotingState = new JSONObject();
-                memberVotingState.put("member-name", actor.getMember());
-                memberVotingState.put("voting", false);
-                votingStateArray.put(memberVotingState);
-            }
-            for(ClusterActor actor : standbySite) {
-                memberVotingState = new JSONObject();
-                memberVotingState.put("member-name", actor.getMember());
-                memberVotingState.put("voting", true);
-                votingStateArray.put(memberVotingState);
-            }
-            inputBlock.put("member-voting-state", votingStateArray);
-            votingInput.put("input", inputBlock);
-            log.debug("{}", votingInput);
-            // Change voting all shards
-            getRequestContent(httpProtocol + self.getNode() + ":" + port + "/restconf/operations/cluster-admin:change-member-voting-states-for-all-shards", HttpMethod.POST, votingInput.toString());
-        } catch(IOException e) {
-            log.error("Changing voting", e);
-            outputBuilder.setMessage("Failover aborted. Failed to change voting.");
-            outputBuilder.setStatus("500");
-            return false;
-        }
-        return true;
-    }
-
+            log.error("downUnreachableNodes(): Error setting nodes unreachable", e);
+        }
+    }
+
+    /**
+     * Triggers a data backup and export sequence of MD-SAL data. Invokes the
+     * {@code data-export-import:schedule-export} RPC to schedule a data export
+     * and subsequently the {@code daexim-offsite-backup:backup-data} RPC
+     * against the active site to export and backup the data. Assumes the
+     * controllers have the org.onap.ccsdk.sli.northbound.daeximoffsitebackup
+     * bundle installed.
+     *
+     * @param activeSite list of nodes in the active site
+     * @param port http or https port of the controller
+     * @deprecated No longer used since the refactor to use the HealthResolver
+     *             pattern. Retained so the logic can be replicated later.
+     */
     private void backupMdSal(ArrayList<ClusterActor> activeSite, String port) {
-        log.info("Backing up data...");
+        log.info("backupMdSal(): Backing up data...");
         try {
-            log.info("Scheduling backup for: {}", activeSite.get(0).getNode());
-            getRequestContent(httpProtocol + activeSite.get(0).getNode() + ":" + port + "/restconf/operations/data-export-import:schedule-export", HttpMethod.POST, "{ \"input\": { \"run-at\": \"30\" } }");
+            log.info("backupMdSal(): Scheduling backup for: {}", activeSite.get(0).getNode());
+            ConnectionResponse response = ConnectionManager.getConnectionResponse(httpProtocol + activeSite.get(0).getNode() + ":" + port + "/restconf/operations/data-export-import:schedule-export", ConnectionManager.HttpMethod.POST, "{ \"input\": { \"run-at\": \"30\" } }", "");
         } catch(IOException e) {
-            log.error("Error backing up MD-SAL", e);
+            log.error("backupMdSal(): Error backing up MD-SAL", e);
         }
         for(ClusterActor actor : activeSite) {
             try {
                 // Move data offsite
-                log.info("Backing up data for: {}", actor.getNode());
-                getRequestContent(httpProtocol + actor.getNode() + ":" + port + "/restconf/operations/daexim-offsite-backup:backup-data", HttpMethod.POST);
+                log.info("backupMdSal(): Backing up data for: {}", actor.getNode());
+                ConnectionResponse response = ConnectionManager.getConnectionResponse(httpProtocol + actor.getNode() + ":" + port + "/restconf/operations/daexim-offsite-backup:backup-data", ConnectionManager.HttpMethod.POST, null, "");
             } catch(IOException e) {
-                log.error("Error backing up data.", e);
+                log.error("backupMdSal(): Error backing up data.", e);
             }
         }
     }
 
-    private ListenableFuture<RpcResult<ClusterHealthOutput>> buildClusterHealthOutput(String statusCode) {
+    /**
+     * Builds a response object for {@code clusterHealth()}. Sorts and iterates
+     * over the contents of the {@code memberMap}, which contains the health
+     * information of the cluster, and adds them to the {@code outputBuilder}.
+     * If the ClusterActor is healthy, according to
+     * {@code resolver.isControllerHealthy()}, the {@code ClusterHealthOutput}
+     * status has a {@code 0} appended, otherwise a {@code 1} is appended. A
+     * status of all zeroes denotes a healthy cluster. This status should be
+     * easily decoded by tools which use the output.
+     *
+     * @return future containing a completed {@code ClusterHealthOutput}
+     * @see ClusterActor
+     * @see ClusterHealthOutput
+     * @see HealthResolver
+     */
+    @SuppressWarnings("unchecked")
+    private ListenableFuture<RpcResult<ClusterHealthOutput>> buildClusterHealthOutput() {
         ClusterHealthOutputBuilder outputBuilder = new ClusterHealthOutputBuilder();
-        outputBuilder.setStatus(statusCode);
-        outputBuilder.setMembers((List) new ArrayList<Member>());
-        int site1Health = 0;
-        int site2Health = 0;
-
-        for(Map.Entry<String, ClusterActor> entry : memberMap.entrySet()) {
-            ClusterActor clusterActor = entry.getValue();
-            if(clusterActor.isUp() && !clusterActor.isUnreachable()) {
-                if(ClusterActor.SITE_1.equals(clusterActor.getSite()))
-                    site1Health++;
-                else if(ClusterActor.SITE_2.equals(clusterActor.getSite()))
-                    site2Health++;
-            }
-            outputBuilder.getMembers().add(new MemberBuilder(clusterActor).build());
-        }
-        if(siteConfiguration == SiteConfiguration.SOLO) {
-            outputBuilder.setSite1Health(HEALTHY);
-        }
-        else if(site1Health > 1) {
-            outputBuilder.setSite1Health(HEALTHY);
-        }
-        else {
-            outputBuilder.setSite1Health(FAULTY);
-        }
-
-        if(siteConfiguration == SiteConfiguration.GEO && site2Health > 1) {
-            outputBuilder.setSite2Health(HEALTHY);
-        }
-        else if(siteConfiguration == SiteConfiguration.GEO) {
-            outputBuilder.setSite2Health(FAULTY);
-        }
-
         outputBuilder.setServedBy(member);
+        List memberList = new ArrayList<Member>();
+        StringBuilder stat = new StringBuilder();
+        memberMap.values()
+                .stream()
+                .sorted(Comparator.comparingInt(member -> Integer.parseInt(member.getMember().split("-")[1])))
+                .forEach(member -> {
+                    memberList.add(new MemberBuilder(member).build());
+                    // 0 is a healthy controller, 1 is unhealthy.
+                    // The list is sorted so users can decode to find unhealthy nodes
+                    // This will also let them figure out health on a per-site basis
+                    // Depending on any tools they use with this API
+                    if(resolver.isControllerHealthy(member)) {
+                        stat.append("0");
+                    } else {
+                        stat.append("1");
+                    }
+                });
+        outputBuilder.setStatus(stat.toString());
+        outputBuilder.setMembers(memberList);
         RpcResult<ClusterHealthOutput> rpcResult = RpcResultBuilder.<ClusterHealthOutput>status(true).withResult(outputBuilder.build()).build();
-        log.info("{}:cluster-health: Site 1 | Healthy ODLs {}", APP_NAME, site1Health);
-        if(siteConfiguration == SiteConfiguration.GEO) {
-            log.info("{}:cluster-health: Site 2 | Healthy ODLs {}", APP_NAME, site2Health);
-        }
         return Futures.immediateFuture(rpcResult);
     }
 
-    private ListenableFuture<RpcResult<SiteHealthOutput>> buildSiteHealthOutput(String statusCode, String adminHealth, String databaseHealth) {
+    /**
+     * Builds a response object for {@code siteHealth()}. Iterates over a list
+     * of {@code SiteHealth} objects and populates the {@code SiteHealthOutput}
+     * with the information.
+     *
+     * @param sites list of sites
+     * @return future containing a completed {@code SiteHealthOutput}
+     * @see SiteHealth
+     * @see HealthResolver
+     */
+    @SuppressWarnings("unchecked")
+    private ListenableFuture<RpcResult<SiteHealthOutput>> buildSiteHealthOutput(List<SiteHealth> sites) {
         SiteHealthOutputBuilder outputBuilder = new SiteHealthOutputBuilder();
-        outputBuilder.setStatus(statusCode);
+        SitesBuilder siteBuilder = new SitesBuilder();
+        outputBuilder.setStatus("200");
         outputBuilder.setSites((List) new ArrayList<Site>());
 
-        if(siteConfiguration != SiteConfiguration.GEO) {
-            int healthyODLs = 0;
-            SitesBuilder builder = new SitesBuilder();
-            for(Map.Entry<String, ClusterActor> entry : memberMap.entrySet()) {
-                ClusterActor clusterActor = entry.getValue();
-                if(clusterActor.isUp() && !clusterActor.isUnreachable()) {
-                    healthyODLs++;
-                }
-            }
-            if(siteConfiguration != SiteConfiguration.SOLO) {
-                builder.setHealth(HEALTHY);
-                builder.setRole("ACTIVE");
-                builder.setId(siteIdentifier);
-            }
-            else {
-                builder = getSitesBuilder(healthyODLs, true, HEALTHY.equals(adminHealth), HEALTHY.equals(databaseHealth), siteIdentifier);
-            }
-            outputBuilder.getSites().add(builder.build());
-        }
-        else {
-            int site1HealthyODLs = 0;
-            int site2HealthyODLs = 0;
-            boolean site1Voting = false;
-            boolean site2Voting = false;
-            boolean performedCrossSiteHealthCheck = false;
-            boolean crossSiteAdminHealthy = false;
-            boolean crossSiteDbHealthy = false;
-            String crossSiteIdentifier = "UNKNOWN_SITE";
-            String port = "true".equals(properties.getProperty(PropertyKeys.CONTROLLER_USE_SSL)) ? properties.getProperty(PropertyKeys.CONTROLLER_PORT_SSL) : properties.getProperty(PropertyKeys.CONTROLLER_PORT_HTTP);
-            if(isSite1()) {
-                // Make calls over to site 2 healthchecks
-                for(Map.Entry<String, ClusterActor> entry : memberMap.entrySet()) {
-                    ClusterActor clusterActor = entry.getValue();
-                    if(clusterActor.isUp() && !clusterActor.isUnreachable()) {
-                        if(ClusterActor.SITE_1.equals(clusterActor.getSite())) {
-                            site1HealthyODLs++;
-                            if(clusterActor.isVoting()) {
-                                site1Voting = true;
-                            }
-                        }
-                        else {
-                            site2HealthyODLs++;
-                            if(clusterActor.isVoting()) {
-                                site2Voting = true;
-                            }
-                            if(!performedCrossSiteHealthCheck) {
-                                try {
-                                    String content = getRequestContent(httpProtocol + clusterActor.getNode() + ":" + port + "/restconf/operations/gr-toolkit:site-identifier", HttpMethod.POST);
-                                    crossSiteIdentifier = new JSONObject(content).getJSONObject(OUTPUT).getString("id");
-                                    crossSiteDbHealthy = crossSiteHealthRequest(httpProtocol + clusterActor.getNode() + ":" + port + "/restconf/operations/gr-toolkit:database-health");
-                                    crossSiteAdminHealthy = crossSiteHealthRequest(httpProtocol + clusterActor.getNode() + ":" + port + "/restconf/operations/gr-toolkit:admin-health");
-                                    performedCrossSiteHealthCheck = true;
-                                } catch(Exception e) {
-                                    log.info("Cannot get cross site health from {}", clusterActor.getNode());
-                                    log.info("siteIdentifier: {} | dbHealth: {} | adminHealth: {}", crossSiteIdentifier, crossSiteDbHealthy, crossSiteAdminHealthy);
-                                    log.error("Site Health Error", e);
-                                }
-                            }
-                        }
-                    }
-                }
-                SitesBuilder builder = getSitesBuilder(site1HealthyODLs, site1Voting, HEALTHY.equals(adminHealth), HEALTHY.equals(databaseHealth), siteIdentifier);
-                outputBuilder.getSites().add(builder.build());
-                builder = getSitesBuilder(site2HealthyODLs, site2Voting, crossSiteAdminHealthy, crossSiteDbHealthy, crossSiteIdentifier);
-                outputBuilder.getSites().add(builder.build());
-                log.info("{}:site-health: Site 1 ({}) | hasVotingMembers?: {} | Healthy ODLs: {} | ADM isHealthy?: {} | DB isHealthy?: {}", APP_NAME, siteIdentifier, site1Voting, site1HealthyODLs, HEALTHY.equals(adminHealth), HEALTHY.equals(databaseHealth));
-                log.info("{}:site-health: Site 2 ({}) | hasVotingMembers?: {} | Healthy ODLs: {} | ADM isHealthy?: {} | DB isHealthy?: {}", APP_NAME, crossSiteIdentifier, site2Voting, site2HealthyODLs, crossSiteAdminHealthy, crossSiteDbHealthy);
-            }
-            else {
-                // Make calls over to site 1 healthchecks
-                for(Map.Entry<String, ClusterActor> entry : memberMap.entrySet()) {
-                    ClusterActor clusterActor = entry.getValue();
-                    if(clusterActor.isUp() && !clusterActor.isUnreachable()) {
-                        if(ClusterActor.SITE_1.equals(clusterActor.getSite())) {
-                            site1HealthyODLs++;
-                            if(clusterActor.isVoting()) {
-                                site1Voting = true;
-                            }
-                            if(!performedCrossSiteHealthCheck) {
-                                try {
-                                    String content = getRequestContent(httpProtocol + clusterActor.getNode() + ":" + port + "/restconf/operations/gr-toolkit:site-identifier", HttpMethod.POST);
-                                    crossSiteIdentifier = new JSONObject(content).getJSONObject(OUTPUT).getString("id");
-                                    crossSiteDbHealthy = crossSiteHealthRequest(httpProtocol + clusterActor.getNode() + ":" + port + "/restconf/operations/gr-toolkit:database-health");
-                                    crossSiteAdminHealthy = crossSiteHealthRequest(httpProtocol + clusterActor.getNode() + ":" + port + "/restconf/operations/gr-toolkit:admin-health");
-                                    performedCrossSiteHealthCheck = true;
-                                } catch(Exception e) {
-                                    log.info("Cannot get cross site health from {}", clusterActor.getNode());
-                                    log.info("siteIdentifier: {} | dbHealth: {} | adminHealth: {}", crossSiteIdentifier, crossSiteDbHealthy, crossSiteAdminHealthy);
-                                    log.error("Site Health Error", e);
-                                }
-                            }
-                        }
-                        else {
-                            site2HealthyODLs++;
-                            if(clusterActor.isVoting()) {
-                                site2Voting = true;
-                            }
-                        }
-                    }
-                }
-                // Build Output
-                SitesBuilder builder = getSitesBuilder(site1HealthyODLs, site1Voting, crossSiteAdminHealthy, crossSiteDbHealthy, crossSiteIdentifier);
-                outputBuilder.getSites().add(builder.build());
-                builder = getSitesBuilder(site2HealthyODLs, site2Voting, HEALTHY.equals(adminHealth), HEALTHY.equals(databaseHealth), siteIdentifier);
-                outputBuilder.getSites().add(builder.build());
-                log.info("{}:site-health: Site 1 ({}) | hasVotingMembers?: {} | Healthy ODLs: {} | ADM isHealthy?: {} | DB isHealthy?: {}", APP_NAME, siteIdentifier, site1Voting, site1HealthyODLs, HEALTHY.equals(adminHealth), HEALTHY.equals(databaseHealth));
-                log.info("{}:site-health: Site 2 ({}) | hasVotingMembers?: {} | Healthy ODLs: {} | ADM isHealthy?: {} | DB isHealthy?: {}", APP_NAME, crossSiteIdentifier, site2Voting, site2HealthyODLs, crossSiteAdminHealthy, crossSiteDbHealthy);
-            }
+        for(SiteHealth site : sites) {
+            siteBuilder.setHealth(site.getHealth().toString());
+            siteBuilder.setRole(site.getRole());
+            siteBuilder.setId(site.getId());
+            outputBuilder.getSites().add(siteBuilder.build());
+            log.info("buildSiteHealthOutput(): Health for {}: {}", site.getId(), site.getHealth().getHealth());
         }
 
         outputBuilder.setServedBy(member);
         return Futures.immediateFuture(rpcResult);
     }
 
-    private SitesBuilder getSitesBuilder(int siteHealthyODLs, boolean siteVoting, boolean adminHealthy, boolean dbHealthy, String siteIdentifier) {
-        SitesBuilder builder = new SitesBuilder();
-        if(siteHealthyODLs > 1) {
-            builder.setHealth(HEALTHY);
-        }
-        else {
-            log.warn("{} Healthy ODLs: {}", siteIdentifier, siteHealthyODLs);
-            builder.setHealth(FAULTY);
-        }
-        if(!adminHealthy) {
-            log.warn("{} Admin Health: {}", siteIdentifier, FAULTY);
-            builder.setHealth(FAULTY);
-        }
-        if(!dbHealthy) {
-            log.warn("{} Database Health: {}", siteIdentifier, FAULTY);
-            builder.setHealth(FAULTY);
-        }
-        if(siteVoting) {
-            builder.setRole("ACTIVE");
-        }
-        else {
-            builder.setRole("STANDBY");
-        }
-        builder.setId(siteIdentifier);
-        return builder;
-    }
-
-    private boolean isSite1() {
-        int memberNumber = Integer.parseInt(member.split("-")[1]);
-        boolean isSite1 = memberNumber < 4;
-        log.info("isSite1(): {}", isSite1);
-        return isSite1;
-    }
-
+    /**
+     * Parses a line containing the akka networking information of the akka
+     * controller cluster. Assumes entries of the format:
+     * <p>
+     * akka.tcp://opendaylight-cluster-data@<FQDN>:<AKKA_PORT>
+     * <p>
+     * The information is stored in a {@code ClusterActor} object, and then
+     * added to the memberMap HashMap, with the {@code FQDN} as the key. The
+     * final step is a call to {@code createHealthResolver} to create the
+     * health resolver for the provider.
+     *
+     * @param line the line containing all of the seed nodes
+     * @see ClusterActor
+     * @see HealthResolver
+     */
     private void parseSeedNodes(String line) {
         memberMap = new HashMap<>();
         line = line.substring(line.indexOf("[\""), line.indexOf(']'));
             int delimLocation = nodeName.indexOf('@');
             String port = nodeName.substring(splits[ndx].indexOf(':', delimLocation) + 1, splits[ndx].indexOf('"', splits[ndx].indexOf(':')));
             splits[ndx] = nodeName.substring(delimLocation + 1, splits[ndx].indexOf(':', delimLocation));
-            log.info("Adding node: {}:{}", splits[ndx], port);
+            log.info("parseSeedNodes(): Adding node: {}:{}", splits[ndx], port);
             ClusterActor clusterActor = new ClusterActor();
             clusterActor.setNode(splits[ndx]);
             clusterActor.setAkkaPort(port);
             clusterActor.setMember("member-" + (ndx + 1));
-            if(ndx < 3) {
-                clusterActor.setSite(ClusterActor.SITE_1);
-            }
-            else {
-                clusterActor.setSite(ClusterActor.SITE_2);
-            }
-
             if(member.equals(clusterActor.getMember())) {
                 self = clusterActor;
             }
             memberMap.put(clusterActor.getNode(), clusterActor);
-            log.info("{}", clusterActor);
+            log.info("parseSeedNodes(): {}", clusterActor);
         }
 
-        if(memberMap.size() == 1) {
-            log.info("1 member found. This is a solo environment.");
-            siteConfiguration = SiteConfiguration.SOLO;
-        }
-        else if(memberMap.size() == 3) {
-            log.info("This is a single site.");
-            siteConfiguration = SiteConfiguration.SINGLE;
-        }
-        else if(memberMap.size() == 6) {
-            log.info("This is a georedundant site.");
-            siteConfiguration = SiteConfiguration.GEO;
-        }
+        createHealthResolver();
     }
 
-    private void getMemberStatus(ClusterActor clusterActor) throws IOException {
-        log.info("Getting member status for {}", clusterActor.getNode());
-        String content = getRequestContent(httpProtocol + clusterActor.getNode() + jolokiaClusterPath, HttpMethod.GET);
-        try {
-            JSONObject responseJson = new JSONObject(content);
-            JSONObject responseValue = responseJson.getJSONObject(VALUE);
-            clusterActor.setUp("Up".equals(responseValue.getString("MemberStatus")));
-            clusterActor.setUnreachable(false);
-        } catch(JSONException e) {
-            log.error("Error parsing response from {}", clusterActor.getNode(), e);
-            clusterActor.setUp(false);
-            clusterActor.setUnreachable(true);
-        }
-    }
-
-    private void getShardStatus(ClusterActor clusterActor) throws IOException {
-        log.info("Getting shard status for {}", clusterActor.getNode());
-        String content = getRequestContent(httpProtocol + clusterActor.getNode() + shardManagerPath, HttpMethod.GET);
-        try {
-            JSONObject responseValue = new JSONObject(content).getJSONObject(VALUE);
-            JSONArray shardList = responseValue.getJSONArray("LocalShards");
-
-            String pattern = "-config$";
-            Pattern r = Pattern.compile(pattern);
-            Matcher m;
-            for(int ndx = 0; ndx < shardList.length(); ndx++) {
-                String configShardName = shardList.getString(ndx);
-                m = r.matcher(configShardName);
-                String operationalShardName = m.replaceFirst("-operational");
-                String shardConfigPath = String.format(shardPathTemplate, configShardName);
-                String shardOperationalPath = String.format(shardPathTemplate, operationalShardName).replace("Config", "Operational");
-                extractShardInfo(clusterActor, configShardName, shardConfigPath);
-                extractShardInfo(clusterActor, operationalShardName, shardOperationalPath);
-            }
-        } catch(JSONException e) {
-            log.error("Error parsing response from " + clusterActor.getNode(), e);
-        }
-    }
-
-    private void extractShardInfo(ClusterActor clusterActor, String shardName, String shardPath) throws IOException {
-        log.info("Extracting shard info for {}", shardName);
-        log.debug("Pulling config info for {} from: {}", shardName, shardPath);
-        String content = getRequestContent(httpProtocol + clusterActor.getNode() + shardPath, HttpMethod.GET);
-        log.debug("Response: {}", content);
-
+    /**
+     * Creates the specific health resolver requested by the user, as specified
+     * in the gr-toolkit.properties file. If a resolver is not specified, or
+     * there is an issue creating the resolver, it will use a fallback resolver
+     * based on how many nodes are added to the memberMap HashMap.
+     *
+     * @see HealthResolver
+     * @see SingleNodeHealthResolver
+     * @see ThreeNodeHealthResolver
+     * @see SixNodeHealthResolver
+     */
+    private void createHealthResolver() {
+        log.info("createHealthResolver(): Creating health resolver...");
         try {
-            JSONObject shardValue = new JSONObject(content).getJSONObject(VALUE);
-            clusterActor.setVoting(shardValue.getBoolean("Voting"));
-            if(shardValue.getString("PeerAddresses").length() > 0) {
-                clusterActor.getReplicaShards().add(shardName);
-                if(shardValue.getString("Leader").startsWith(clusterActor.getMember())) {
-                    clusterActor.getShardLeader().add(shardName);
-                }
-            }
-            else {
-                clusterActor.getNonReplicaShards().add(shardName);
+            Class resolverClass = null;
+            String userDefinedResolver = properties.getProperty(PropertyKeys.RESOLVER);
+            if(StringUtils.isEmpty(userDefinedResolver)) {
+                throw new InstantiationException();
             }
-            JSONArray followerInfo = shardValue.getJSONArray("FollowerInfo");
-            for(int followerNdx = 0; followerNdx < followerInfo.length(); followerNdx++) {
-                int commitIndex = shardValue.getInt("CommitIndex");
-                int matchIndex = followerInfo.getJSONObject(followerNdx).getInt("matchIndex");
-                if(commitIndex != -1 && matchIndex != -1) {
-                    int commitsBehind = commitIndex - matchIndex;
-                    clusterActor.getCommits().put(followerInfo.getJSONObject(followerNdx).getString("id"), commitsBehind);
-                }
-            }
-        } catch(JSONException e) {
-            log.error("Error parsing response from " + clusterActor.getNode(), e);
-        }
-    }
-
-    private void getControllerHealth() {
-        for(Map.Entry<String, ClusterActor> entry : memberMap.entrySet()) {
-            ClusterActor clusterActor = entry.getValue();
-            String key = entry.getKey();
-            try {
-                // First flush out the old values
-                clusterActor.flush();
-                log.info("Gathering info for {}", clusterActor.getNode());
-                getMemberStatus(clusterActor);
-                getShardStatus(clusterActor);
-                log.info("MemberInfo:\n{}", clusterActor);
-            } catch(IOException e) {
-                log.error("Connection Error", e);
-                memberMap.get(key).setUnreachable(true);
-                memberMap.get(key).setUp(false);
-                log.info("MemberInfo:\n{}", memberMap.get(key));
+            resolverClass = Class.forName(userDefinedResolver);
+            Class[] types = { Map.class , properties.getClass(), DbLibService.class };
+            Constructor<HealthResolver> constructor = resolverClass.getConstructor(types);
+            Object[] parameters = { memberMap, properties, dbLib };
+            resolver = constructor.newInstance(parameters);
+            log.info("createHealthResolver(): Created resolver from name {}", resolver.toString());
+        } catch(ClassNotFoundException | InstantiationException | InvocationTargetException | NoSuchMethodException | IllegalAccessException e) {
+            log.warn("createHealthResolver(): Could not create user defined resolver", e);
+            if(memberMap.size() == 1) {
+                log.info("createHealthResolver(): FALLBACK: Initializing SingleNodeHealthResolver...");
+                resolver  = new SingleNodeHealthResolver(memberMap, properties, dbLib);
+            } else if(memberMap.size() == 3) {
+                log.info("createHealthResolver(): FALLBACK: Initializing ThreeNodeHealthResolver...");
+                resolver  = new ThreeNodeHealthResolver(memberMap, properties, dbLib);
+            } else if(memberMap.size() == 6) {
+                log.info("createHealthResolver(): FALLBACK: Initializing SixNodeHealthResolver...");
+                resolver  = new SixNodeHealthResolver(memberMap, properties, dbLib);
             }
         }
     }
 
+    /**
+     * Adds or drops IPTables rules to block or resume akka traffic for a node
+     * in the akka cluster. Assumes that the user or group that the controller
+     * is run as has the ability to run sudo /sbin/iptables without requiring a
+     * password. This method will run indefinitely if that assumption is not
+     * correct. This method does not check to see if any rules around the node
+     * are preexisting, so multiple uses will result in multiple additions and
+     * removals from IPTables.
+     *
+     * @param task the operation to be performed against IPTables
+     * @param nodeInfo array containing the nodes to be added or dropped from
+     *                 IPTables
+     */
     private void modifyIpTables(IpTables task, Object[] nodeInfo) {
-        log.info("Modifying IPTables rules...");
+        log.info("modifyIpTables(): Modifying IPTables rules...");
         if(task == IpTables.ADD) {
             for(Object node : nodeInfo) {
                 org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.halt.akka.traffic.input.NodeInfo n =
                         (org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.halt.akka.traffic.input.NodeInfo) node;
-                log.info("Isolating {}", n.getNode());
+                log.info("modifyIpTables(): Isolating {}", n.getNode());
                 executeCommand(String.format("sudo /sbin/iptables -A INPUT -p tcp --destination-port %s -j DROP -s %s", properties.get(PropertyKeys.CONTROLLER_PORT_AKKA), n.getNode()));
                 executeCommand(String.format("sudo /sbin/iptables -A OUTPUT -p tcp --destination-port %s -j DROP -d %s", n.getPort(), n.getNode()));
             }
-
         } else if(task == IpTables.DELETE) {
             for(Object node : nodeInfo) {
                 org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.resume.akka.traffic.input.NodeInfo n =
                         (org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.resume.akka.traffic.input.NodeInfo) node;
-                log.info("De-isolating {}", n.getNode());
+                log.info("modifyIpTables(): De-isolating {}", n.getNode());
                 executeCommand(String.format("sudo /sbin/iptables -D INPUT -p tcp --destination-port %s -j DROP -s %s", properties.get(PropertyKeys.CONTROLLER_PORT_AKKA), n.getNode()));
                 executeCommand(String.format("sudo /sbin/iptables -D OUTPUT -p tcp --destination-port %s -j DROP -d %s", n.getPort(), n.getNode()));
             }
-
         }
-        executeCommand("sudo /sbin/iptables -L");
+        if(nodeInfo.length > 0) {
+            executeCommand("sudo /sbin/iptables -L");
+        }
     }
 
+    /**
+     * Opens a shell session and executes a command.
+     *
+     * @param command the shell command to execute
+     */
     private void executeCommand(String command) {
-        log.info("Executing command: {}", command);
+        log.info("executeCommand(): Executing command: {}", command);
         String[] cmd = command.split(" ");
         try {
             Process p = Runtime.getRuntime().exec(cmd);
                 content.append(inputLine);
             }
             bufferedReader.close();
-            log.info("{}", content);
-        } catch(IOException e) {
-            log.error("Error executing command", e);
-        }
-    }
-
-    private boolean crossSiteHealthRequest(String path) throws IOException {
-        String content = getRequestContent(path, HttpMethod.POST);
-        try {
-            JSONObject responseJson = new JSONObject(content);
-            JSONObject responseValue = responseJson.getJSONObject(OUTPUT);
-            return HEALTHY.equals(responseValue.getString("health"));
-        } catch(JSONException e) {
-            log.error("Error parsing JSON", e);
-            throw new IOException();
-        }
-    }
-
-    private String getAdminHealth() {
-        String protocol = "true".equals(properties.getProperty(PropertyKeys.ADM_USE_SSL)) ? "https://" : "http://";
-        String port = "true".equals(properties.getProperty(PropertyKeys.ADM_USE_SSL)) ? properties.getProperty(PropertyKeys.ADM_PORT_SSL) : properties.getProperty(PropertyKeys.ADM_PORT_HTTP);
-        String path = protocol + properties.getProperty(PropertyKeys.ADM_FQDN) + ":" + port + properties.getProperty(PropertyKeys.ADM_HEALTHCHECK);
-        log.info("Requesting healthcheck from {}", path);
-        try {
-            int response = getRequestStatus(path, HttpMethod.GET);
-            log.info("Response: {}", response);
-            if(response == 200)
-                return HEALTHY;
-            return FAULTY;
+            log.info("executeCommand(): {}", content);
         } catch(IOException e) {
-            log.error("Problem getting ADM health.", e);
-            return FAULTY;
-        }
-    }
-
-    private String getDatabaseHealth() {
-        log.info("Determining database health...");
-        try {
-            Connection connection = dbLib.getConnection();
-            log.debug("DBLib isActive(): {}", dbLib.isActive());
-            log.debug("DBLib isReadOnly(): {}", connection.isReadOnly());
-            log.debug("DBLib isClosed(): {}", connection.isClosed());
-            if(!dbLib.isActive() || connection.isClosed() || connection.isReadOnly()) {
-                log.warn("Database is FAULTY");
-                connection.close();
-                return FAULTY;
-            }
-            connection.close();
-            log.info("Database is HEALTHY");
-        } catch(SQLException e) {
-            log.error("Database is FAULTY");
-            log.error("Error", e);
-            return FAULTY;
-        }
-
-        return HEALTHY;
-    }
-
-    private String getRequestContent(String path, HttpMethod method) throws IOException {
-        return getRequestContent(path, method, null);
-    }
-
-    private String getRequestContent(String path, HttpMethod method, String input) throws IOException {
-        HttpURLConnection connection = getConnection(path);
-        connection.setRequestMethod(method.getMethod());
-        connection.setDoInput(true);
-
-        if(input != null) {
-            sendPayload(input, connection);
-        }
-
-        BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(connection.getInputStream()));
-        String inputLine;
-        StringBuilder content = new StringBuilder();
-        while((inputLine = bufferedReader.readLine()) != null) {
-            content.append(inputLine);
-        }
-        bufferedReader.close();
-        connection.disconnect();
-
-        String response = content.toString();
-        log.debug("getRequestContent(): Response:\n{}", response);
-        return response;
-    }
-
-    private int getRequestStatus(String path, HttpMethod method) throws IOException {
-        return getRequestStatus(path, method, null);
-    }
-
-    private int getRequestStatus(String path, HttpMethod method, String input) throws IOException {
-        HttpURLConnection connection = getConnection(path);
-        connection.setRequestMethod(method.getMethod());
-        connection.setDoInput(true);
-
-        if(input != null) {
-            sendPayload(input, connection);
-        }
-        int response = connection.getResponseCode();
-        log.info("Received {} response code from {}", response, path);
-        connection.disconnect();
-        return response;
-    }
-
-    private void sendPayload(String input, HttpURLConnection connection) throws IOException {
-        byte[] out = input.getBytes(StandardCharsets.UTF_8);
-        int length = out.length;
-
-        connection.setFixedLengthStreamingMode(length);
-        connection.setRequestProperty("Content-Type", "application/json");
-        connection.setDoOutput(true);
-        connection.connect();
-        try(OutputStream os = connection.getOutputStream()) {
-            os.write(out);
+            log.error("executeCommand(): Error executing command", e);
         }
     }
 
-    private HttpURLConnection getConnection(String host) throws IOException {
-        log.info("Getting connection to: {}", host);
-        URL url = new URL(host);
-        String auth = "Basic " + javax.xml.bind.DatatypeConverter.printBase64Binary(credentials.getBytes());
-        HttpURLConnection connection = (HttpURLConnection) url.openConnection();
-        connection.addRequestProperty("Authorization", auth);
-        connection.setRequestProperty("Connection", "keep-alive");
-        connection.setRequestProperty("Proxy-Connection", "keep-alive");
-        connection.setConnectTimeout(CONNECTION_TIMEOUT);
-        connection.setReadTimeout(CONNECTION_TIMEOUT);
-        return connection;
-    }
-
+    /**
+     * The IPTables operations this module can perform.
+     */
     enum IpTables {
         ADD,
         DELETE
     }
-
-    enum SiteConfiguration {
-        SOLO,
-        SINGLE,
-        GEO
-    }
-
-    enum HttpMethod {
-        GET("GET"),
-        POST("POST");
-
-        private String method;
-        HttpMethod(String method) {
-            this.method = method;
-        }
-        public String getMethod() {
-            return method;
-        }
-    }
-
-    class PropertyKeys {
-        static final String SITE_IDENTIFIER = "site.identifier";
-        static final String CONTROLLER_USE_SSL = "controller.useSsl";
-        static final String CONTROLLER_PORT_SSL = "controller.port.ssl";
-        static final String CONTROLLER_PORT_HTTP = "controller.port.http";
-        static final String CONTROLLER_PORT_AKKA = "controller.port.akka";
-        static final String CONTROLLER_CREDENTIALS = "controller.credentials";
-        static final String AKKA_CONF_LOCATION = "akka.conf.location";
-        static final String MBEAN_CLUSTER = "mbean.cluster";
-        static final String MBEAN_SHARD_MANAGER  = "mbean.shardManager";
-        static final String MBEAN_SHARD_CONFIG = "mbean.shard.config";
-        static final String ADM_USE_SSL = "adm.useSsl";
-        static final String ADM_PORT_SSL = "adm.port.ssl";
-        static final String ADM_PORT_HTTP = "adm.port.http";
-        static final String ADM_FQDN = "adm.fqdn";
-        static final String ADM_HEALTHCHECK= "adm.healthcheck";
-    }
 }
\ No newline at end of file
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.connection;
+
+import org.apache.commons.lang.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Handles the process for getting HTTP connections to resources. Has the
+ * ability to send JSON payloads. Only supports basic authorization when
+ * sending credentials.
+ *
+ * @author Anthony Haddox
+ * @see ConnectionResponse
+ */
+public interface ConnectionManager {
+    Logger log = LoggerFactory.getLogger(ConnectionManager.class);
+    int CONNECTION_TIMEOUT = 5000; // 5 second timeout
+    enum HttpMethod {
+        GET("GET"),
+        POST("POST");
+
+        private final String method;
+        HttpMethod(String method) {
+            this.method = method;
+        }
+        String getMethod() {
+            return method;
+        }
+    }
+
+    /**
+     * Writes a JSON payload to an {@code HTTPURLConnection OutputStream}.
+     *
+     * @param input the JSON payload to send
+     * @param connection the {@code HTTPURLConnection} to write to
+     * @throws IOException if there is a problem writing to the output stream
+     */
+    static void sendPayload(String input, HttpURLConnection connection) throws IOException {
+        byte[] out = input.getBytes(StandardCharsets.UTF_8);
+        int length = out.length;
+
+        connection.setFixedLengthStreamingMode(length);
+        connection.setRequestProperty("Content-Type", "application/json");
+        connection.setDoOutput(true);
+        connection.connect();
+        try(OutputStream os = connection.getOutputStream()) {
+            os.write(out);
+        }
+    }
+
+    /**
+     * Gets an {@code HTTPURLConnection} to a {@code host}.
+     *
+     * @param host the host to connect to
+     * @return an {@code HTTPURLConnection}
+     * @throws IOException if a connection cannot be opened
+     */
+    static HttpURLConnection getConnection(String host) throws IOException {
+        log.info("getConnection(): Getting connection to: {}", host);
+        URL url = new URL(host);
+        HttpURLConnection connection = (HttpURLConnection) url.openConnection();
+        connection.setRequestProperty("Connection", "keep-alive");
+        connection.setRequestProperty("Proxy-Connection", "keep-alive");
+        connection.setConnectTimeout(CONNECTION_TIMEOUT);
+        connection.setReadTimeout(CONNECTION_TIMEOUT);
+        return connection;
+    }
+
+    /**
+     * Gets an {@code HTTPURLConnection} to a {@code host} and sets the
+     * Authorization header with the supplied credentials. Only supports basic
+     * authentication.
+     *
+     * @param host the host to connect to
+     * @param credentials the authorization credentials
+     * @return an {@code HTTPURLConnection} with Authorization header set
+     * @throws IOException if a connection cannot be opened
+     */
+    static HttpURLConnection getConnection(String host, String credentials) throws IOException {
+        String auth = "Basic " + javax.xml.bind.DatatypeConverter.printBase64Binary(credentials.getBytes());
+        HttpURLConnection connection = getConnection(host);
+        connection.addRequestProperty("Authorization", auth);
+        credentials = null;
+        auth = null;
+        return connection;
+    }
+
+    /**
+     * Opens a connection to a path, sends a payload (if supplied with one),
+     * and returns the response.
+     * @param path the host to connect to
+     * @param method the {@code HttpMethod} to use
+     * @param input the payload to send
+     * @param credentials the credentials to use
+     * @return a {@code ConnectionResponse} containing the response body and
+     *         status code of the operation
+     * @throws IOException if a connection cannot be opened or if the payload
+     *                     cannot be sent
+     * @see HttpMethod
+     */
+    static ConnectionResponse getConnectionResponse(String path, HttpMethod method, String input, String credentials) throws IOException {
+        HttpURLConnection connection = (StringUtils.isEmpty(credentials)) ? getConnection(path) : getConnection(path, credentials);
+        credentials = null;
+        connection.setRequestMethod(method.getMethod());
+        connection.setDoInput(true);
+
+        if(!StringUtils.isEmpty(input)) {
+            sendPayload(input, connection);
+        }
+
+        StringBuilder content = new StringBuilder();
+        try(BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(connection.getInputStream()))) {
+            String inputLine;
+            while((inputLine = bufferedReader.readLine()) != null) {
+                content.append(inputLine);
+            }
+        } finally {
+            connection.disconnect();
+        }
+
+        ConnectionResponse connectionResponse = new ConnectionResponse();
+        connectionResponse.content = content.toString();
+        connectionResponse.statusCode = connection.getResponseCode();
+        log.info("getConnectionResponse(): {} response code from {}", connectionResponse.statusCode, path);
+        log.debug("getConnectionResponse(): Response:\n{}", connectionResponse.content);
+        return connectionResponse;
+    }
+}
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.connection;
+
+/**
+ * A data container for HTTP connection requests.
+ *
+ * @author Anthony Haddox
+ * @see ConnectionManager
+ */
+public class ConnectionResponse {
+    public int statusCode;
+    public String content;
+
+    public ConnectionResponse withStatusCode(int statusCode) {
+        this.statusCode = statusCode;
+        return this;
+    }
+
+    @Override
+    public String toString() {
+        return "Status: " + statusCode + "\nContent: " + content;
+    }
+}
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.data;
+
+/**
+ * A data container for Admin health.
+ *
+ * @author Anthony Haddox
+ * @see org.onap.ccsdk.sli.plugins.grtoolkit.resolver.HealthResolver
+ */
+public class AdminHealth {
+    private Health health;
+    private int statusCode;
+
+    public AdminHealth(Health health) {
+        this.health = health;
+    }
+
+    public AdminHealth(Health health, int statusCode) {
+        this.health = health;
+        this.statusCode = statusCode;
+    }
+
+    public Health getHealth() {
+        return health;
+    }
+
+    public void setHealth(Health health) {
+        this.health = health;
+    }
+
+    public int getStatusCode() {
+        return statusCode;
+    }
+
+    public void setStatusCode(int statusCode) {
+        this.statusCode = statusCode;
+    }
+}
 
 import java.util.List;
 import java.util.Map;
 
+/**
+ * A data container with information about an actor in the Akka cluster.
+ *
+ * @author Anthony Haddox
+ */
 public class ClusterActor {
     private String node;
     private String member;
             builder.append(" Up");
         else
             builder.append(" Down");
-        if(unreachable)
+        if(unreachable) {
             builder.append(" [ UNREACHABLE ]");
+            return builder.toString();
+        }
 
         if(voting)
             builder.append(" (Voting)");
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.data;
+
+/**
+ * A data container for Akka Cluster health.
+ *
+ * @author Anthony Haddox
+ * @see org.onap.ccsdk.sli.plugins.grtoolkit.resolver.HealthResolver
+ */
+public class ClusterHealth {
+    private Health health;
+
+    public ClusterHealth() {
+        health = Health.FAULTY;
+    }
+
+    public ClusterHealth withHealth(Health h) {
+        this.health = h;
+        return this;
+    }
+
+    public Health getHealth() {
+        return health;
+    }
+
+    public void setHealth(Health health) {
+        this.health = health;
+    }
+}
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.data;
+
+/**
+ * A data container for Database health.
+ *
+ * @author Anthony Haddox
+ * @see org.onap.ccsdk.sli.plugins.grtoolkit.resolver.HealthResolver
+ */
+public class DatabaseHealth {
+    private Health health;
+
+    public DatabaseHealth(Health health) {
+        this.health = health;
+    }
+
+    public Health getHealth() {
+        return health;
+    }
+
+    public void setHealth(Health health) {
+        this.health = health;
+    }
+}
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.data;
+
+/**
+ * A data container for the status of a controller-level failover.
+ *
+ * @author Anthony Haddox
+ * @see org.onap.ccsdk.sli.plugins.grtoolkit.resolver.HealthResolver
+ */
+public class FailoverStatus {
+    private int statusCode;
+    private String message;
+
+    public FailoverStatus() {
+        this.statusCode = 200;
+        this.message = "Failover complete.";
+    }
+
+    public FailoverStatus withStatusCode(int code) {
+        this.statusCode = code;
+        return this;
+    }
+
+    public FailoverStatus withMessage(String message) {
+        this.message = message;
+        return this;
+    }
+
+    public int getStatusCode() {
+        return statusCode;
+    }
+
+    public void setStatusCode(int statusCode) {
+        this.statusCode = statusCode;
+    }
+
+    public String getMessage() {
+        return message;
+    }
+
+    public void setMessage(String message) {
+        this.message = message;
+    }
+}
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.data;
+
+/**
+ * Potential health values.
+ *
+ * @author Anthony Haddox
+ */
+public enum Health {
+    HEALTHY("HEALTHY"),
+    FAULTY("FAULTY");
+
+    private final String health;
+    Health(String health) {
+        this.health = health;
+    }
+    public String getHealth() {
+        return health;
+    }
+}
 
 import java.util.List;
 import java.util.Map;
 
+/**
+ * Extends the {@code MembersBuilder} generated from the gr-toolkit.yang model.
+ * Uses information from a {@code ClusterActor} to populate the builder fields.
+ *
+ * @author Anthony Haddox
+ * @see ClusterActor
+ * @see org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.cluster.health.output.MembersBuilder
+ */
 public class MemberBuilder extends MembersBuilder {
     public MemberBuilder(ClusterActor actor) {
         super();
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+package org.onap.ccsdk.sli.plugins.grtoolkit.data;
+
+public interface PropertyKeys {
+    String RESOLVER = "resolver";
+    String SITE_IDENTIFIER = "site.identifier";
+    String CONTROLLER_USE_SSL = "controller.useSsl";
+    String CONTROLLER_PORT_SSL = "controller.port.ssl";
+    String CONTROLLER_PORT_HTTP = "controller.port.http";
+    String CONTROLLER_PORT_AKKA = "controller.port.akka";
+    String CONTROLLER_CREDENTIALS = "controller.credentials";
+    String AKKA_CONF_LOCATION = "akka.conf.location";
+    String MBEAN_CLUSTER = "mbean.cluster";
+    String MBEAN_SHARD_MANAGER  = "mbean.shardManager";
+    String MBEAN_SHARD_CONFIG = "mbean.shard.config";
+    String ADM_USE_SSL = "adm.useSsl";
+    String ADM_PORT_SSL = "adm.port.ssl";
+    String ADM_PORT_HTTP = "adm.port.http";
+    String ADM_FQDN = "adm.fqdn";
+    String ADM_HEALTHCHECK= "adm.healthcheck";
+}
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.data;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+
+/**
+ * A data container for Site health.
+ *
+ * @author Anthony Haddox
+ * @see org.onap.ccsdk.sli.plugins.grtoolkit.resolver.HealthResolver
+ */
+public class SiteHealth {
+    private List<AdminHealth> adminHealth;
+    private List<DatabaseHealth> databaseHealth;
+    private List<ClusterHealth> clusterHealth;
+
+    private Health health;
+    private String id;
+    private String role;
+
+    public SiteHealth() {
+        adminHealth = new ArrayList<>();
+        databaseHealth = new ArrayList<>();
+        clusterHealth = new ArrayList<>();
+
+        // Faulty by default, it's up to the health check to affirm the health
+        health = Health.FAULTY;
+    }
+
+    public SiteHealth withAdminHealth(AdminHealth... health) {
+        Collections.addAll(adminHealth, health);
+        return this;
+    }
+
+    public SiteHealth withDatabaseHealth(DatabaseHealth... health) {
+        Collections.addAll(databaseHealth, health);
+        return this;
+    }
+
+    public SiteHealth withClusterHealth(ClusterHealth... health) {
+        Collections.addAll(clusterHealth, health);
+        return this;
+    }
+
+    public SiteHealth withId(String id) {
+        this.id = id;
+        return this;
+    }
+
+    public SiteHealth withRole(String role) {
+        this.role = role;
+        return this;
+    }
+
+    public Health getHealth() {
+        return health;
+    }
+
+    public void setHealth(Health health) {
+        this.health = health;
+    }
+
+    public List<AdminHealth> getAdminHealth() {
+        return adminHealth;
+    }
+
+    public void setAdminHealth(List<AdminHealth> adminHealth) {
+        this.adminHealth = adminHealth;
+    }
+
+    public List<DatabaseHealth> getDatabaseHealth() {
+        return databaseHealth;
+    }
+
+    public void setDatabaseHealth(List<DatabaseHealth> databaseHealth) {
+        this.databaseHealth = databaseHealth;
+    }
+
+    public List<ClusterHealth> getClusterHealth() {
+        return clusterHealth;
+    }
+
+    public void setClusterHealth(List<ClusterHealth> clusterHealth) {
+        this.clusterHealth = clusterHealth;
+    }
+
+    public String getId() {
+        return id;
+    }
+
+    public void setId(String id) {
+        this.id = id;
+    }
+
+    public String getRole() {
+        return role;
+    }
+
+    public void setRole(String role) {
+        this.role = role;
+    }
+}
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.resolver;
+
+import org.json.JSONException;
+import org.json.JSONObject;
+
+import org.onap.ccsdk.sli.core.dblib.DbLibService;
+import org.onap.ccsdk.sli.plugins.grtoolkit.connection.ConnectionManager;
+import org.onap.ccsdk.sli.plugins.grtoolkit.connection.ConnectionResponse;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.AdminHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterActor;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.DatabaseHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.FailoverStatus;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.Health;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.PropertyKeys;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.SiteHealth;
+
+import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.FailoverInput;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.onap.ccsdk.sli.plugins.grtoolkit.data.Health.HEALTHY;
+
+/**
+ * Abstract class for the Health Resolver system, which allows for custom logic
+ * to be implemented, while leaving inputs/outputs generic and architecture
+ * agnostic. This class provides some simple implementations of both Admin and
+ * Database health checking, but leaves cluster and site health determinations
+ * up to the implementer. Useful implementation examples can be found in the
+ * {@code SingleNodeHealthResolver}, {@code ThreeNodeHealthResolver}, and
+ * {@code SixNodeHealthResolver} classes.
+ *
+ * @author Anthony Haddox
+ * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+ * @see SingleNodeHealthResolver
+ * @see ThreeNodeHealthResolver
+ * @see SixNodeHealthResolver
+ */
+public abstract class HealthResolver {
+    private final Logger log = LoggerFactory.getLogger(HealthResolver.class);
+    static final String OUTPUT = "output";
+    final String httpProtocol;
+    final String controllerPort;
+    final String credentials;
+    final Map<String, ClusterActor> memberMap;
+    private DbLibService dbLib;
+    final ShardResolver shardResolver;
+    private String adminPath;
+    private String siteIdentifier;
+
+    /**
+     * Constructs the health resolver used by the {@code GrToolkitProvider} to
+     * determine the health of the application components.
+     *
+     * @param map a HashMap containing all of the nodes in the akka cluster
+     * @param properties the properties passed ino the provider
+     * @param dbLib a reference to the {@code DbLibService} of the provider
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     */
+    HealthResolver(Map<String, ClusterActor> map, Properties properties, DbLibService dbLib) {
+        log.info("Creating {}", this.getClass().getCanonicalName());
+        this.memberMap = map;
+        this.dbLib = dbLib;
+        shardResolver = ShardResolver.getInstance(properties);
+
+        String adminProtocol = "true".equals(properties.getProperty(PropertyKeys.ADM_USE_SSL)) ? "https://" : "http://";
+        String adminPort = "true".equals(properties.getProperty(PropertyKeys.ADM_USE_SSL)) ? properties.getProperty(PropertyKeys.ADM_PORT_SSL) : properties.getProperty(PropertyKeys.ADM_PORT_HTTP);
+        adminPath = adminProtocol + properties.getProperty(PropertyKeys.ADM_FQDN) + ":" + adminPort + properties.getProperty(PropertyKeys.ADM_HEALTHCHECK);
+        siteIdentifier = properties.getProperty(PropertyKeys.SITE_IDENTIFIER).trim();
+
+        controllerPort = "true".equals(properties.getProperty(PropertyKeys.CONTROLLER_USE_SSL).trim()) ? properties.getProperty(PropertyKeys.CONTROLLER_PORT_SSL).trim() : properties.getProperty(PropertyKeys.CONTROLLER_PORT_HTTP).trim();
+        httpProtocol = "true".equals(properties.getProperty(PropertyKeys.CONTROLLER_USE_SSL).trim()) ? "https://" : "http://";
+        if(siteIdentifier == null || siteIdentifier.isEmpty()) {
+            siteIdentifier = properties.getProperty(PropertyKeys.SITE_IDENTIFIER).trim();
+        }
+        credentials = properties.getProperty(PropertyKeys.CONTROLLER_CREDENTIALS).trim();
+    }
+
+    public abstract ClusterHealth getClusterHealth();
+    public abstract List<SiteHealth> getSiteHealth();
+    public abstract FailoverStatus tryFailover(FailoverInput input);
+    public abstract void resolveSites();
+
+    /**
+     * Gets a connection to the admin portal. If the status code is 200, the
+     * admin portal is assumed to be healthy.
+     *
+     * @return an {@code AdminHealth} object with health of the admin portal
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     * @see AdminHealth
+     */
+    public AdminHealth getAdminHealth() {
+        log.info("getAdminHealth(): Requesting health check from {}", adminPath);
+        try {
+            ConnectionResponse response = ConnectionManager.getConnectionResponse(adminPath, ConnectionManager.HttpMethod.GET, null, null);
+            Health health = (response.statusCode == 200) ? HEALTHY : Health.FAULTY;
+            AdminHealth adminHealth = new AdminHealth(health, response.statusCode);
+            log.info("getAdminHealth(): Response: {}", response);
+            return adminHealth;
+        } catch(IOException e) {
+            log.error("getAdminHealth(): Problem getting ADM health.", e);
+            return new AdminHealth(Health.FAULTY, 500);
+        }
+    }
+
+    /**
+     * Uses {@code DbLibService} to get a connection to the database. If
+     * {@code DbLibService} is active and the connection it returns is not read
+     * only, the database(s) is assumed to be healthy.
+     *
+     * @return an {@code DatabaseHealth} object with health of the database
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     * @see DatabaseHealth
+     */
+    public DatabaseHealth getDatabaseHealth() {
+        log.info("getDatabaseHealth(): Determining database health...");
+        try (Connection connection = dbLib.getConnection()){
+            log.debug("getDatabaseHealth(): DBLib isActive(): {}", dbLib.isActive());
+            log.debug("getDatabaseHealth(): DBLib isReadOnly(): {}", connection.isReadOnly());
+            log.debug("getDatabaseHealth(): DBLib isClosed(): {}", connection.isClosed());
+            if(!dbLib.isActive() || connection.isClosed() || connection.isReadOnly()) {
+                log.warn("getDatabaseHealth(): Database is FAULTY");
+                return new DatabaseHealth(Health.FAULTY);
+            }
+            log.info("getDatabaseHealth(): Database is HEALTHY");
+        } catch(SQLException e) {
+            log.error("getDatabaseHealth(): Database is FAULTY");
+            log.error("getDatabaseHealth(): Error", e);
+            return new DatabaseHealth(Health.FAULTY);
+        }
+
+        return new DatabaseHealth(HEALTHY);
+    }
+
+    /**
+     * Utility method to see if an input is healthy.
+     *
+     * @return true if the input is healthy
+     * @see Health
+     */
+    boolean isHealthy(Health h) {
+        return HEALTHY == h;
+    }
+
+    public String getSiteIdentifier() {
+        return siteIdentifier;
+    }
+
+    public void setSiteIdentifier(String siteIdentifier) {
+        this.siteIdentifier = siteIdentifier;
+    }
+
+    /**
+     * Used to invoke the admin-health or database-health RPC to check if that
+     * component is healthy.
+     *
+     * @param path the path to the admin-health or database-health RPCs
+     * @return true if the component is healthy
+     * @throws IOException if a connection cannot be obtained
+     */
+    boolean isRemoteComponentHealthy(String path) throws IOException {
+        String content = ConnectionManager.getConnectionResponse(path, ConnectionManager.HttpMethod.POST, null, credentials).content;
+        try {
+            JSONObject responseJson = new JSONObject(content);
+            JSONObject responseValue = responseJson.getJSONObject(OUTPUT);
+            return HEALTHY.toString().equals(responseValue.getString("health"));
+        } catch(JSONException e) {
+            log.error("Error parsing JSON", e);
+            throw new IOException();
+        }
+    }
+
+    /**
+     * Checks a {@code ClusterActor} object to see if the node is healthy.
+     *
+     * @param controller the controller to check
+     * @return true if the controller is up and reachable
+     * @see ClusterActor
+     */
+    public boolean isControllerHealthy(ClusterActor controller) {
+        return (controller.isUp() && ! controller.isUnreachable());
+    }
+}
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.resolver;
+
+import org.json.JSONArray;
+import org.json.JSONException;
+import org.json.JSONObject;
+
+import org.onap.ccsdk.sli.plugins.grtoolkit.connection.ConnectionManager;
+import org.onap.ccsdk.sli.plugins.grtoolkit.connection.ConnectionResponse;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterActor;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.PropertyKeys;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * Used to perform operations on the data shard information returned as JSON
+ * from Jolokia.
+ *
+ * @author Anthony Haddox
+ * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+ * @see HealthResolver
+ */
+public class ShardResolver {
+    private final Logger log = LoggerFactory.getLogger(ShardResolver.class);
+    private static ShardResolver _shardResolver;
+
+    private String jolokiaClusterPath;
+    private String shardManagerPath;
+    private String shardPathTemplate;
+    private String credentials;
+    private String httpProtocol;
+
+    private static final String VALUE = "value";
+
+    private ShardResolver(Properties properties) {
+        String port = "true".equals(properties.getProperty(PropertyKeys.CONTROLLER_USE_SSL).trim()) ? properties.getProperty(PropertyKeys.CONTROLLER_PORT_SSL).trim() : properties.getProperty(PropertyKeys.CONTROLLER_PORT_HTTP).trim();
+        httpProtocol = "true".equals(properties.getProperty(PropertyKeys.CONTROLLER_USE_SSL).trim()) ? "https://" : "http://";
+        jolokiaClusterPath = ":" + port + properties.getProperty(PropertyKeys.MBEAN_CLUSTER).trim();
+        shardManagerPath = ":" + port + properties.getProperty(PropertyKeys.MBEAN_SHARD_MANAGER).trim();
+        shardPathTemplate = ":" + port + properties.getProperty(PropertyKeys.MBEAN_SHARD_CONFIG).trim();
+        credentials = properties.getProperty(PropertyKeys.CONTROLLER_CREDENTIALS).trim();
+    }
+
+    public static ShardResolver getInstance(Properties properties) {
+        if (_shardResolver == null) {
+            _shardResolver = new ShardResolver(properties);
+        }
+        return _shardResolver;
+    }
+
+    private void getMemberStatus(ClusterActor clusterActor) throws IOException {
+        log.info("getMemberStatus(): Getting member status for {}", clusterActor.getNode());
+        ConnectionResponse response = ConnectionManager.getConnectionResponse(httpProtocol + clusterActor.getNode() + jolokiaClusterPath, ConnectionManager.HttpMethod.GET, null, credentials);
+        try {
+            JSONObject responseJson = new JSONObject(response.content);
+            JSONObject responseValue = responseJson.getJSONObject(VALUE);
+            clusterActor.setUp("Up".equals(responseValue.getString("MemberStatus")));
+            clusterActor.setUnreachable(false);
+        } catch(JSONException e) {
+            log.error("getMemberStatus(): Error parsing response from {}", clusterActor.getNode(), e);
+            clusterActor.setUp(false);
+            clusterActor.setUnreachable(true);
+        }
+    }
+
+    private void getShardStatus(ClusterActor clusterActor) throws IOException {
+        log.info("getShardStatus(): Getting shard status for {}", clusterActor.getNode());
+        ConnectionResponse response = ConnectionManager.getConnectionResponse(httpProtocol + clusterActor.getNode() + shardManagerPath, ConnectionManager.HttpMethod.GET, null, credentials);
+        try {
+            JSONObject responseValue = new JSONObject(response.content).getJSONObject(VALUE);
+            JSONArray shardList = responseValue.getJSONArray("LocalShards");
+
+            String pattern = "-config$";
+            Pattern r = Pattern.compile(pattern);
+            List<String> shards = new ArrayList<>();
+            for(int ndx = 0; ndx < shardList.length(); ndx++) {
+                shards.add(shardList.getString(ndx));
+            }
+            shards.parallelStream().forEach(shard -> {
+                Matcher m = r.matcher(shard);
+                String operationalShardName = m.replaceFirst("-operational");
+                String shardConfigPath = String.format(shardPathTemplate, shard);
+                String shardOperationalPath = String.format(shardPathTemplate, operationalShardName).replace("Config", "Operational");
+                try {
+                    extractShardInfo(clusterActor, shard, shardConfigPath);
+                    extractShardInfo(clusterActor, operationalShardName, shardOperationalPath);
+                } catch(IOException e) {
+                    log.error("getShardStatus(): Error extracting shard info for {}", shard);
+                }
+            });
+        } catch(JSONException e) {
+            log.error("getShardStatus(): Error parsing response from " + clusterActor.getNode(), e);
+        }
+    }
+
+    private void extractShardInfo(ClusterActor clusterActor, String shardName, String shardPath) throws IOException {
+        log.info("extractShardInfo(): Extracting shard info for {}", shardName);
+        String shardPrefix = "";
+//        String shardPrefix = clusterActor.getMember() + "-shard-";
+        log.debug("extractShardInfo(): Pulling config info for {} from: {}", shardName, shardPath);
+        ConnectionResponse response = ConnectionManager.getConnectionResponse(httpProtocol + clusterActor.getNode() + shardPath, ConnectionManager.HttpMethod.GET, null, credentials);
+        log.debug("extractShardInfo(): Response: {}", response.content);
+
+        try {
+            JSONObject shardValue = new JSONObject(response.content).getJSONObject(VALUE);
+            clusterActor.setVoting(shardValue.getBoolean("Voting"));
+            if(shardValue.getString("PeerAddresses").length() > 0) {
+                clusterActor.getReplicaShards().add(shardName.replace(shardPrefix, ""));
+                if(shardValue.getString("Leader").startsWith(clusterActor.getMember())) {
+                    clusterActor.getShardLeader().add(shardName.replace(shardPrefix, ""));
+                }
+            } else {
+                clusterActor.getNonReplicaShards().add(shardName.replace(shardPrefix, ""));
+            }
+            JSONArray followerInfo = shardValue.getJSONArray("FollowerInfo");
+            for(int followerNdx = 0; followerNdx < followerInfo.length(); followerNdx++) {
+                int commitIndex = shardValue.getInt("CommitIndex");
+                int matchIndex = followerInfo.getJSONObject(followerNdx).getInt("matchIndex");
+                if(commitIndex != -1 && matchIndex != -1) {
+                    int commitsBehind = commitIndex - matchIndex;
+                    clusterActor.getCommits().put(followerInfo.getJSONObject(followerNdx).getString("id"), commitsBehind);
+                }
+            }
+        } catch(JSONException e) {
+            log.error("extractShardInfo(): Error parsing response from " + clusterActor.getNode(), e);
+        }
+    }
+
+    public void getControllerHealth(Map<String, ClusterActor> memberMap) {
+        memberMap.values().parallelStream().forEach(this::getControllerHealth);
+    }
+
+    // Seen ConcurrentAccess issues, probably related to getting the controller health
+    private synchronized void getControllerHealth(ClusterActor clusterActor) {
+        clusterActor.flush();
+        log.info("getControllerHealth(): Gathering info for {}", clusterActor.getNode());
+        try {
+            // First flush out the old values
+            getMemberStatus(clusterActor);
+            getShardStatus(clusterActor);
+        } catch(IOException e) {
+            log.error("getControllerHealth(): Connection Error", e);
+            clusterActor.setUnreachable(true);
+            clusterActor.setUp(false);
+        }
+        log.info("getControllerHealth(): MemberInfo:\n{}", clusterActor);
+    }
+}
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.resolver;
+
+import org.onap.ccsdk.sli.core.dblib.DbLibService;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.AdminHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterActor;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.DatabaseHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.FailoverStatus;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.Health;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.SiteHealth;
+
+import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.FailoverInput;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * Implementation of {@code HealthResolver} for a single node controller
+ * architecture.
+ *
+ * @author Anthony Haddox
+ * @see HealthResolver
+ */
+public class SingleNodeHealthResolver extends HealthResolver {
+    private final Logger log = LoggerFactory.getLogger(SingleNodeHealthResolver.class);
+
+    /**
+     * Constructs the health resolver used by the {@code GrToolkitProvider} to
+     * determine the health of the application components.
+     *
+     * @param map a HashMap containing all of the nodes in the akka cluster
+     * @param properties the properties passed ino the provider
+     * @param dbLib a reference to the {@code DbLibService} of the provider
+     * @see HealthResolver
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     */
+    public SingleNodeHealthResolver(Map<String, ClusterActor> map, Properties properties, DbLibService dbLib) {
+        super(map, properties, dbLib);
+        resolveSites();
+    }
+
+    /**
+     * Implementation of {@code getClusterHealth()}. Uses the
+     * {@code ShardResolver} to gather health information about the controller.
+     * This method assumes the cluster is always healthy since it is a single
+     * node.
+     *
+     * @return an {@code ClusterHealth} object with health of the akka cluster
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     * @see HealthResolver
+     * @see ClusterHealth
+     * @see ShardResolver
+     */
+    @Override
+    public ClusterHealth getClusterHealth() {
+        log.info("getClusterHealth(): Getting cluster health...");
+        shardResolver.getControllerHealth(memberMap);
+        return new ClusterHealth().withHealth(Health.HEALTHY);
+    }
+
+    /**
+     * Implementation of {@code getSiteHealth()}. Uses the results from
+     * {@code getAdminHealth}, {@code getDatabaseHealth}, and
+     * {@code getClusterHealth} to determine the health of the site. If all
+     * components are healthy, the site is healthy.
+     *
+     * @return a List of {@code SiteHealth} objects with health of the site
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     * @see HealthResolver
+     * @see SiteHealth
+     * @see ShardResolver
+     */
+    @Override
+    public List<SiteHealth> getSiteHealth() {
+        log.info("getSiteHealth(): Getting site health...");
+        AdminHealth adminHealth = getAdminHealth();
+        DatabaseHealth databaseHealth = getDatabaseHealth();
+        ClusterHealth clusterHealth = getClusterHealth();
+        SiteHealth siteHealth = new SiteHealth()
+                                        .withAdminHealth(adminHealth)
+                                        .withDatabaseHealth(databaseHealth)
+                                        .withClusterHealth(clusterHealth)
+                                        .withRole("ACTIVE")
+                                        .withId(getSiteIdentifier());
+        log.info("getSiteHealth(): Admin Health: {}", adminHealth.getHealth().toString());
+        log.info("getSiteHealth(): Database Health: {}", databaseHealth.getHealth().toString());
+        log.info("getSiteHealth(): Cluster Health: {}", clusterHealth.getHealth().toString());
+        if(isHealthy(adminHealth.getHealth()) && isHealthy(databaseHealth.getHealth()) && isHealthy(clusterHealth.getHealth())) {
+            siteHealth.setHealth(Health.HEALTHY);
+        }
+
+        return Collections.singletonList(siteHealth);
+    }
+
+    /**
+     * Implementation of {@code tryFailover()}. No controller-level failover
+     * options are available in a single node architecture, so 400 Bad Request
+     * is returned, and no action is taken.
+     *
+     * @return an {@code SiteHealth} object with health of the site
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     * @see HealthResolver
+     * @see FailoverStatus
+     * @see FailoverInput
+     */
+    @Override
+    public FailoverStatus tryFailover(FailoverInput input) {
+        log.info("tryFailover(): Failover not supported in the current configuration.");
+        return new FailoverStatus().withStatusCode(400).withMessage("Failover not supported in current configuration.");
+    }
+
+    /**
+     * Implementation of {@code resolveSites()}. Calls
+     * {@code resolveSiteForMember()} to resolve which site a member belongs to.
+     *
+     * @see HealthResolver
+     */
+    @Override
+    public void resolveSites() {
+        log.info("Map contains {} entries", memberMap.size());
+        memberMap.forEach((key, value) -> resolveSiteForMember(value));
+    }
+
+    /**
+     * Resolves which site a member belongs to. Since this is a Single node
+     * architecture, it is defaulted to <i>Site 1</i>.
+     *
+     * @see HealthResolver
+     */
+    private void resolveSiteForMember(ClusterActor actor) {
+        actor.setSite("Site 1");
+        log.info("resolveSiteForMember(): {} belongs to {}", actor.getNode(), actor.getSite());
+    }
+}
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.resolver;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+import org.onap.ccsdk.sli.core.dblib.DbLibService;
+import org.onap.ccsdk.sli.plugins.grtoolkit.connection.ConnectionManager;
+import org.onap.ccsdk.sli.plugins.grtoolkit.connection.ConnectionResponse;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.AdminHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterActor;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.DatabaseHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.FailoverStatus;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.Health;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.SiteHealth;
+
+import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.FailoverInput;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Properties;
+import java.util.stream.Collectors;
+
+/**
+ * Implementation of {@code HealthResolver} for a six node controller
+ * architecture, where three nodes are located in one data center, and the
+ * other three nodes are located in another. The sites are assumed to be in an
+ * Active/Standby configuration, with the Active site nodes voting and the
+ * Standby site notes non-voting.
+ *
+ * @author Anthony Haddox
+ * @see HealthResolver
+ */
+public class SixNodeHealthResolver extends HealthResolver {
+    private final Logger log = LoggerFactory.getLogger(SixNodeHealthResolver.class);
+
+    /**
+     * Constructs the health resolver used by the {@code GrToolkitProvider} to
+     * determine the health of the application components.
+     *
+     * @param map a HashMap containing all of the nodes in the akka cluster
+     * @param properties the properties passed ino the provider
+     * @param dbLib a reference to the {@code DbLibService} of the provider
+     * @see HealthResolver
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     */
+    public SixNodeHealthResolver(Map<String, ClusterActor> map, Properties properties, DbLibService dbLib) {
+        super(map, properties, dbLib);
+        resolveSites();
+    }
+
+    /**
+     * Implementation of {@code getClusterHealth()}. Uses the
+     * {@code ShardResolver} to gather health information about the controller.
+     * If 4 of 6 members are healthy, the cluster is deemed healthy.
+     *
+     * @return an {@code ClusterHealth} object with health of the akka cluster
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     * @see HealthResolver
+     * @see ClusterHealth
+     * @see ShardResolver
+     */
+    @Override
+    public ClusterHealth getClusterHealth() {
+        log.info("getClusterHealth(): Getting cluster health...");
+        shardResolver.getControllerHealth(memberMap);
+        long healthyMembers = memberMap.values().stream().filter(member -> member.isUp() && ! member.isUnreachable()).count();
+        return (healthyMembers > 4) ? new ClusterHealth().withHealth(Health.HEALTHY) : new ClusterHealth().withHealth(Health.FAULTY);
+    }
+
+    /**
+     * Implementation of {@code getSiteHealth()}. Gathers health information on
+     * all of the contollers, then separates the nodes into voting and
+     * non-voting sites. Each site is then checked for its health and the
+     * result is returned as a List.
+     *
+     * @return a List of {@code SiteHealth} objects with health of the site
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     * @see HealthResolver
+     * @see SiteHealth
+     * @see ShardResolver
+     */
+    @Override
+    public List<SiteHealth> getSiteHealth() {
+        log.info("getSiteHealth(): Getting site health...");
+
+        // Get cluster health to populate memberMap with necessary values
+        getClusterHealth();
+        List<ClusterActor> votingActors = memberMap.values().stream().filter(ClusterActor::isVoting).collect(Collectors.toList());
+        List<ClusterActor> nonVotingActors = memberMap.values().stream().filter(member -> !member.isVoting()).collect(Collectors.toList());
+
+        SiteHealth votingSiteHealth = getSiteHealth(votingActors).withRole("ACTIVE");
+        SiteHealth nonVotingSiteHealth = getSiteHealth(nonVotingActors).withRole("STANDBY");
+        return Arrays.asList(votingSiteHealth, nonVotingSiteHealth);
+    }
+
+    /**
+     * Gathers the site identifier, admin health, and database health of a
+     * site.
+     *
+     * @return a {@code SiteHealth} object with health of the site
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     * @see ClusterActor
+     * @see SiteHealth
+     * @see ConnectionManager
+     */
+    public SiteHealth getSiteHealth(List<ClusterActor> actorList) {
+        AdminHealth adminHealth = null;
+        DatabaseHealth databaseHealth = null;
+        String siteId = null;
+        int healthyMembers = 0;
+
+        for(ClusterActor actor : actorList) {
+            if(actor.isUp() && !actor.isUnreachable()) {
+                healthyMembers++;
+            }
+            if(siteId == null) {
+                try {
+                    String content = ConnectionManager.getConnectionResponse(httpProtocol + actor.getNode() + ":" + controllerPort + "/restconf/operations/gr-toolkit:site-identifier", ConnectionManager.HttpMethod.POST, null, credentials).content;
+                    siteId = new JSONObject(content).getJSONObject(OUTPUT).getString("id");
+                } catch(IOException e) {
+                    log.error("getSiteHealth(): Error getting site identifier from {}", actor.getNode());
+                    log.error("getSiteHealth(): IOException", e);
+                }
+            }
+            if(adminHealth == null) {
+                try {
+                    boolean isAdminHealthy  = isRemoteComponentHealthy(httpProtocol + actor.getNode() + ":" + controllerPort + "/restconf/operations/gr-toolkit:admin-health");
+                    if(isAdminHealthy) {
+                        adminHealth = new AdminHealth(Health.HEALTHY, 200);
+                    }
+                } catch(IOException e) {
+                    log.error("getSiteHealth(): Error getting admin health from {}", actor.getNode());
+                    log.error("getSiteHealth(): IOException", e);
+                }
+            }
+            if(databaseHealth == null) {
+                try {
+                    boolean isDatabaseHealthy = isRemoteComponentHealthy(httpProtocol + actor.getNode() + ":" + controllerPort + "/restconf/operations/gr-toolkit:database-health");
+                    if(isDatabaseHealthy) {
+                        databaseHealth = new DatabaseHealth(Health.HEALTHY);
+                    }
+                } catch(IOException e) {
+                    log.error("getSiteHealth(): Error getting database health from {}", actor.getNode());
+                    log.error("getSiteHealth(): IOException", e);
+                }
+            }
+        }
+
+        if(siteId == null) {
+            siteId = "UNKNOWN SITE";
+        }
+        if(adminHealth == null) {
+            adminHealth = new AdminHealth(Health.FAULTY, 500);
+        }
+        if(databaseHealth == null) {
+            databaseHealth = new DatabaseHealth(Health.FAULTY);
+        }
+        SiteHealth health = new SiteHealth()
+                                    .withAdminHealth(adminHealth)
+                                    .withDatabaseHealth(databaseHealth)
+                                    .withId(siteId);
+        if(isHealthy(adminHealth.getHealth()) && isHealthy(databaseHealth.getHealth()) && healthyMembers > 1) {
+            health.setHealth(Health.HEALTHY);
+        }
+
+        return health;
+    }
+
+    /**
+     * Implementation of {@code tryFailover()}. Performs a preliminary call to
+     * {@code getClusterHealth} to populate information about the cluster. If
+     * no voting members can be found, the method terminates immediately. The
+     * nodes are separated into voting and non-voting sites, and a driving
+     * operator is selected from the non-voting nodes to perform requests
+     * against. A payload to swap voting between sites is sent to the operator
+     * to perform a controller-level failover.
+     *
+     * @return an {@code SiteHealth} object with health of the site
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     * @see HealthResolver
+     * @see FailoverStatus
+     * @see FailoverInput
+     */
+    @Override
+    public FailoverStatus tryFailover(FailoverInput input) {
+        // Get Cluster Health to populate the memberMap with the necessary values
+        log.info("tryFailover(): Performing preliminary health check...");
+        getClusterHealth();
+        FailoverStatus status = new FailoverStatus();
+        ConnectionResponse votingResponse = null;
+        List<ClusterActor> votingActors = memberMap.values().stream().filter(ClusterActor::isVoting).collect(Collectors.toList());
+        List<ClusterActor> nonVotingActors = memberMap.values().stream().filter(member -> !member.isVoting()).collect(Collectors.toList());
+
+        if(nonVotingActors.size() == 0) {
+            status.setStatusCode(500);
+            status.setMessage("No nonvoting members found. Cannot perform voting switch.");
+            return status;
+        }
+
+        ClusterActor operator;
+        try {
+            operator = nonVotingActors.stream().filter(this::isControllerHealthy).findFirst().get();
+        } catch(NoSuchElementException e) {
+            log.error("tryFailover(): Could not find any healthy members.", e);
+            status.setStatusCode(500);
+            status.setMessage("Could not find any healthy members.");
+            return status;
+        }
+
+        // Assuming two 3 node sites, 3 voting and 3 non voting
+        if(votingActors.size() < 3 || nonVotingActors.size() < 3) {
+            log.warn("tryFailover(): Sites do not contain an equal amount of voting and nonvoting members: Voting: {} | NonVoting: {}", votingActors.size(), nonVotingActors.size());
+        }
+        log.info("tryFailover(): Swapping voting...");
+        try {
+            JSONObject votingInput = new JSONObject();
+            JSONObject inputBlock = new JSONObject();
+            JSONArray votingStateArray = new JSONArray();
+            JSONObject memberVotingState;
+            for(ClusterActor actor : votingActors) {
+                memberVotingState = new JSONObject();
+                memberVotingState.put("member-name", actor.getMember());
+                memberVotingState.put("voting", false);
+                votingStateArray.put(memberVotingState);
+            }
+            for(ClusterActor actor : nonVotingActors) {
+                memberVotingState = new JSONObject();
+                memberVotingState.put("member-name", actor.getMember());
+                memberVotingState.put("voting", true);
+                votingStateArray.put(memberVotingState);
+            }
+            inputBlock.put("member-voting-state", votingStateArray);
+            votingInput.put("input", inputBlock);
+            log.debug("tryFailover(): {}", votingInput);
+            // Change voting all shards
+            votingResponse = ConnectionManager.getConnectionResponse(httpProtocol + operator.getNode() + ":" + controllerPort + "/restconf/operations/cluster-admin:change-member-voting-states-for-all-shards", ConnectionManager.HttpMethod.POST, votingInput.toString(), credentials);
+        } catch(IOException e) {
+            log.error("tryFailover(): Failure changing voting", e);
+        }
+        if(votingResponse != null) {
+            if(votingResponse.statusCode != 200) {
+                status.setStatusCode(votingResponse.statusCode);
+                status.setMessage("Failed to swap voting.");
+            } else {
+                status.setStatusCode(200);
+                status.setMessage("Failover complete.");
+            }
+        } else {
+            status.setStatusCode(500);
+            status.setMessage("Failed to swap voting.");
+        }
+
+        return status;
+    }
+
+    /**
+     * Implementation of {@code resolveSites()}. Calls
+     * {@code resolveSiteForMember()} to resolve which site a member belongs to.
+     *
+     * @see HealthResolver
+     */
+    @Override
+    public void resolveSites() {
+        log.info("Map contains {} entries", memberMap.size());
+        memberMap.forEach((key, value) -> resolveSiteForMember(value));
+    }
+
+    /**
+     * Resolves which site a member belongs to. Members 1-3 are assumed to be
+     * <i>Site 1</i> while members 4-6 are assumed to be <i>Site 2</i>.
+     *
+     * @see HealthResolver
+     */
+    private void resolveSiteForMember(ClusterActor actor) {
+        try {
+            int memberNumber = Integer.parseInt(actor.getMember().split("-")[1]);
+            if(memberNumber < 4) {
+                actor.setSite("Site 1");
+            } else {
+                actor.setSite("Site 2");
+            }
+            log.info("resolveSiteForMember(): {} belongs to {}", actor.getNode(), actor.getSite());
+        } catch (NumberFormatException e) {
+            log.error("resolveSiteForMember(): Could not parse member number for {}. Defaulting to Site 1.", actor.getNode());
+            actor.setSite("resolveSiteForMember(): Site 1");
+        }
+    }
+}
\ No newline at end of file
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.resolver;
+
+import org.onap.ccsdk.sli.core.dblib.DbLibService;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.AdminHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterActor;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.DatabaseHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.FailoverStatus;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.Health;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.SiteHealth;
+
+import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.FailoverInput;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * Implementation of {@code HealthResolver} for a three node controller
+ * architecture, where all nodes are located within the same data center or
+ * geographic region. The nodes are assumed to be in an Active/Active/Active
+ * voting configuration.
+ *
+ * @author Anthony Haddox
+ * @see HealthResolver
+ */
+public class ThreeNodeHealthResolver extends HealthResolver {
+    private final Logger log = LoggerFactory.getLogger(ThreeNodeHealthResolver.class);
+
+    /**
+     * Constructs the health resolver used by the {@code GrToolkitProvider} to
+     * determine the health of the application components.
+     *
+     * @param map a HashMap containing all of the nodes in the akka cluster
+     * @param properties the properties passed ino the provider
+     * @param dbLib a reference to the {@code DbLibService} of the provider
+     * @see HealthResolver
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     */
+    public ThreeNodeHealthResolver(Map<String, ClusterActor> map, Properties properties, DbLibService dbLib) {
+        super(map, properties, dbLib);
+        resolveSites();
+    }
+
+    /**
+     * Implementation of {@code getClusterHealth()}. Uses the
+     * {@code ShardResolver} to gather health information about the controller.
+     * If 2 of 3 members are healthy, the cluster is deemed healthy.
+     *
+     * @return an {@code ClusterHealth} object with health of the akka cluster
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     * @see HealthResolver
+     * @see ClusterHealth
+     * @see ShardResolver
+     */
+    @Override
+    public ClusterHealth getClusterHealth() {
+        log.info("getClusterHealth(): Getting cluster health...");
+        shardResolver.getControllerHealth(memberMap);
+        long healthyMembers = memberMap.values().stream().filter(member -> member.isUp() && ! member.isUnreachable()).count();
+        return (healthyMembers > 1) ? new ClusterHealth().withHealth(Health.HEALTHY) : new ClusterHealth().withHealth(Health.FAULTY);
+    }
+
+    /**
+     * Implementation of {@code getSiteHealth()}. Uses the results from
+     * {@code getAdminHealth}, {@code getDatabaseHealth}, and
+     * {@code getClusterHealth} to determine the health of the site. If all
+     * components are healthy, the site is healthy.
+     *
+     * @return a List of {@code SiteHealth} objects with health of the site
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     * @see HealthResolver
+     * @see SiteHealth
+     * @see ShardResolver
+     */
+    @Override
+    public List<SiteHealth> getSiteHealth() {
+        log.info("getSiteHealth(): Getting site health...");
+        AdminHealth adminHealth = getAdminHealth();
+        DatabaseHealth databaseHealth = getDatabaseHealth();
+        ClusterHealth clusterHealth = getClusterHealth();
+        SiteHealth siteHealth = new SiteHealth()
+                                        .withAdminHealth(adminHealth)
+                                        .withDatabaseHealth(databaseHealth)
+                                        .withClusterHealth(clusterHealth)
+                                        .withRole("ACTIVE")
+                                        .withId(getSiteIdentifier());
+        log.info("getSiteHealth(): Admin Health: {}", adminHealth.getHealth().toString());
+        log.info("getSiteHealth(): Database Health: {}", databaseHealth.getHealth().toString());
+        log.info("getSiteHealth(): Cluster Health: {}", clusterHealth.getHealth().toString());
+        if(isHealthy(adminHealth.getHealth()) && isHealthy(databaseHealth.getHealth()) && isHealthy(clusterHealth.getHealth())) {
+            siteHealth.setHealth(Health.HEALTHY);
+        }
+
+        return Collections.singletonList(siteHealth);
+    }
+
+    /**
+     * Implementation of {@code tryFailover()}. No controller-level failover
+     * options are available in a three node architecture, so 400 Bad Request
+     * is returned, and no action is taken.
+     *
+     * @return an {@code SiteHealth} object with health of the site
+     * @see org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider
+     * @see HealthResolver
+     * @see FailoverStatus
+     * @see FailoverInput
+     */
+    @Override
+    public FailoverStatus tryFailover(FailoverInput input) {
+        log.info("tryFailover(): Failover not supported in the current configuration.");
+        return new FailoverStatus().withStatusCode(400).withMessage("Failover not supported in current configuration.");
+    }
+
+    /**
+     * Implementation of {@code resolveSites()}. Calls
+     * {@code resolveSiteForMember()} to resolve which site a member belongs to.
+     *
+     * @see HealthResolver
+     */
+    @Override
+    public void resolveSites() {
+        log.info("Map contains {} entries", memberMap.size());
+        memberMap.forEach((key, value) -> resolveSiteForMember(value));
+    }
+
+    /**
+     * Resolves which site a member belongs to. Since this is a three node
+     * co-located architecture, it is defaulted to <i>Site 1</i>.
+     *
+     * @see HealthResolver
+     */
+    private void resolveSiteForMember(ClusterActor actor) {
+        actor.setSite("Site 1");
+        log.info("resolveSiteForMember(): {} belongs to {}", actor.getNode(), actor.getSite());
+    }
+}
 
+++ /dev/null
-<?xml version="1.0" encoding="UTF-8"?>
-<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0"
-           xmlns:odl="http://opendaylight.org/xmlns/blueprint/v1.0.0"
-           odl:use-default-for-reference-types="true">
-
-    <reference id="dataBroker"
-               interface="org.opendaylight.controller.md.sal.binding.api.DataBroker"
-               odl:type="default" />
-
-    <reference id="notificationService"
-               interface="org.opendaylight.controller.md.sal.binding.api.NotificationPublishService"
-               odl:type="default" />
-
-    <reference id="rpcRegistry"
-               interface="org.opendaylight.controller.sal.binding.api.RpcProviderRegistry"
-               odl:type="default" />
-
-    <reference id="dbLib"
-               interface="org.onap.ccsdk.sli.core.dblib.DbLibService" />
-
-    <reference id="configDatastore" interface="org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface"
-               odl:type="distributed-config"/>
-
-    <bean id="provider" class="org.onap.ccsdk.sli.plugins.grtoolkit.GrToolkitProvider">
-        <argument ref="dataBroker" />
-        <argument ref="notificationService" />
-        <argument ref="rpcRegistry" />
-        <argument ref="dbLib" />
-        <argument ref="configDatastore" />
-    </bean>
-
-    <odl:rpc-implementation ref="provider"/>
-</blueprint>
 
 # limitations under the License.
 # ============LICENSE_END=========================================================
 
+resolver=org.onap.ccsdk.sli.plugins.grtoolkit.resolver.SingleNodeHealthResolver
 akka.conf.location=/opt/opendaylight/current/controller/configuration/initial/akka.conf
 adm.useSsl=true
 adm.fqdn=
 mbean.cluster=/jolokia/read/akka:type=Cluster
 mbean.shardManager=/jolokia/read/org.opendaylight.controller:Category=ShardManager,name=shard-manager-config,type=DistributedConfigDatastore
 mbean.shard.config=/jolokia/read/org.opendaylight.controller:Category=Shards,name=%s,type=DistributedConfigDatastore
-site.identifier=UniqueSiteNamehere
+site.identifier=UniqueSiteNameHere
 
  */
 
 package org.onap.ccsdk.sli.plugins.grtoolkit;
+import com.github.tomakehurst.wiremock.junit.WireMockRule;
+
 import com.google.common.util.concurrent.ListenableFuture;
+
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.contrib.java.lang.system.EnvironmentVariables;
+
 import org.onap.ccsdk.sli.core.dblib.DBLibConnection;
 import org.onap.ccsdk.sli.core.dblib.DbLibService;
 import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterActor;
+
 import org.opendaylight.controller.cluster.access.concepts.MemberName;
 import org.opendaylight.controller.cluster.datastore.DistributedDataStoreInterface;
 import org.opendaylight.controller.cluster.datastore.utils.ActorContext;
 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.ClusterHealthOutput;
 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.DatabaseHealthOutput;
 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.FailoverOutput;
-import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.FailoverOutputBuilder;
+import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.HaltAkkaTrafficInputBuilder;
+import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.HaltAkkaTrafficOutput;
+import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.ResumeAkkaTrafficInputBuilder;
+import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.ResumeAkkaTrafficOutput;
 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.SiteHealthOutput;
 import org.opendaylight.yang.gen.v1.org.onap.ccsdk.sli.plugins.gr.toolkit.rev180926.SiteIdentifierOutput;
 import org.opendaylight.yangtools.yang.common.RpcResult;
 
-import java.lang.reflect.Field;
+import java.io.IOException;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
+import java.nio.file.Files;
+import java.nio.file.Paths;
 import java.sql.SQLException;
 import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
+import java.util.Properties;
 import java.util.concurrent.ExecutionException;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static com.github.tomakehurst.wiremock.client.WireMock.aResponse;
+import static com.github.tomakehurst.wiremock.client.WireMock.get;
+import static com.github.tomakehurst.wiremock.client.WireMock.stubFor;
+import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
+
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
     DistributedDataStoreInterface configDatastore;
     DbLibService dbLibService;
     DBLibConnection connection;
+    Properties properties;
 
     @Rule
     public final EnvironmentVariables environmentVariables = new EnvironmentVariables();
+    @Rule
+    public WireMockRule wireMockRule = new WireMockRule(9999);
 
     @Before
     public void setup() {
-        environmentVariables.set("SDNC_CONFIG_DIR","src/test/resources/");
+        environmentVariables.set("SDNC_CONFIG_DIR","src/test/resources");
         dataBroker = mock(DataBroker.class);
         notificationProviderService = mock(NotificationPublishService.class);
         rpcProviderRegistry = mock(RpcProviderRegistry.class);
         provider = new GrToolkitProvider(dataBroker, notificationProviderService,
                 rpcProviderRegistry, configDatastore, dbLibService);
         providerSpy = spy(provider);
+        stubController();
     }
 
     @Test
     public void closeTest() {
         try {
             provider.close();
-        }
-        catch(Exception e) {
+        } catch(Exception e) {
             // Exception expected
         }
     }
         // onDataTreeChanged is an empty stub
     }
 
-    @Test
-    public void clusterHealthTest() {
-        ListenableFuture<RpcResult<ClusterHealthOutput>> result = provider.clusterHealth(null);
-        try {
-            assertEquals("200", result.get().getResult().getStatus());
-        } catch(InterruptedException | ExecutionException e) {
+    private void stubController() {
+        String clusterBody = null;
+        String shardManagerBody = null;
+        String shardDefaultBody = null;
+        String shardOperationalBody = null;
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/three/cluster.json"))) {
+            clusterBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/three/shard-manager.json"))) {
+            shardManagerBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/three/default-config.json"))) {
+            shardDefaultBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/three/default-operational.json"))) {
+            shardOperationalBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
             fail();
         }
-    }
 
-    @Test
-    public void siteHealthTest() {
-        ListenableFuture<RpcResult<SiteHealthOutput>> result = provider.siteHealth(null);
-        try {
-            assertEquals("200", result.get().getResult().getStatus());
-        } catch(InterruptedException | ExecutionException e) {
+        if(clusterBody == null || shardManagerBody == null || shardDefaultBody == null || shardOperationalBody == null) {
             fail();
         }
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(200)));
+        stubFor(get(urlEqualTo("/jolokia/read/akka:type=Cluster")).willReturn(aResponse().withStatus(200).withBody(clusterBody)));
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=ShardManager,name=shard-manager-config,type=DistributedConfigDatastore")).inScenario("testing").willReturn(aResponse().withStatus(200).withBody(shardManagerBody)).willSetStateTo("next"));
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-1-shard-default-config,type=DistributedConfigDatastore")).inScenario("testing").willReturn(aResponse().withStatus(200).withBody(shardDefaultBody)).willSetStateTo("next"));
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-1-shard-default-operational,type=DistributedOperationalDatastore")).willReturn(aResponse().withStatus(200).withBody(shardOperationalBody)));
     }
 
     @Test
-    public void siteHealth6NodeTest() {
-        Map<String, ClusterActor> memberMap = new HashMap<>();
-        ClusterActor actor;
-        for(int ndx = 0; ndx < 6; ndx++) {
-            actor = new ClusterActor();
-            actor.setNode("member-" + (ndx + 1));
-            actor.setUp(true);
-            actor.setUnreachable(false);
-
-            memberMap.put(actor.getNode(),  actor);
-        }
-
+    public void clusterHealthTest() {
+        ListenableFuture<RpcResult<ClusterHealthOutput>> result = provider.clusterHealth(null);
         try {
-            Field field = provider.getClass().getDeclaredField("siteConfiguration");
-            field.setAccessible(true);
-            field.set(provider, GrToolkitProvider.SiteConfiguration.GEO);
-
-            field = provider.getClass().getDeclaredField("memberMap");
-            field.setAccessible(true);
-            field.set(provider, memberMap);
-
-
-            actor = new ClusterActor();
-            actor.setNode("member-1");
-            field = provider.getClass().getDeclaredField("self");
-            field.setAccessible(true);
-            field.set(provider, actor);
-
-            field = provider.getClass().getDeclaredField("member");
-            field.setAccessible(true);
-            field.set(provider, actor.getNode());
-        }
-        catch(IllegalAccessException | NoSuchFieldException e) {
+            assertEquals("0", result.get().getResult().getStatus());
+        } catch(InterruptedException | ExecutionException e) {
             fail();
         }
+    }
 
+    @Test
+    public void siteHealthTest() {
         ListenableFuture<RpcResult<SiteHealthOutput>> result = provider.siteHealth(null);
         try {
             assertEquals("200", result.get().getResult().getStatus());
         }
         ListenableFuture<RpcResult<DatabaseHealthOutput>> result = provider.databaseHealth(null);
         try {
-            assertEquals("200", result.get().getResult().getStatus());
+            assertEquals("500", result.get().getResult().getStatus());
         } catch(InterruptedException | ExecutionException e) {
             fail();
         }
         }
         ListenableFuture<RpcResult<DatabaseHealthOutput>> result = provider.databaseHealth(null);
         try {
-            assertEquals("200", result.get().getResult().getStatus());
+            assertEquals("500", result.get().getResult().getStatus());
         } catch(InterruptedException | ExecutionException e) {
             fail();
         }
         }
     }
 
+    @Test
+    public void haltTrafficTest() {
+        HaltAkkaTrafficInputBuilder builder = new HaltAkkaTrafficInputBuilder();
+        builder.setNodeInfo(new ArrayList<>());
+        ListenableFuture<RpcResult<HaltAkkaTrafficOutput>> result = provider.haltAkkaTraffic(builder.build());
+        try {
+            assertEquals("200", result.get().getResult().getStatus());
+        } catch(InterruptedException | ExecutionException e) {
+            fail();
+        }
+    }
+
+    @Test
+    public void resumeTrafficTest() {
+        ResumeAkkaTrafficInputBuilder builder = new ResumeAkkaTrafficInputBuilder();
+        builder.setNodeInfo(new ArrayList<>());
+        ListenableFuture<RpcResult<ResumeAkkaTrafficOutput>> result = provider.resumeAkkaTraffic(builder.build());
+        try {
+            assertEquals("200", result.get().getResult().getStatus());
+        } catch(InterruptedException | ExecutionException e) {
+            fail();
+        }
+    }
+
     @Test
     public void executeCommandTest() {
         try {
             Method method = provider.getClass().getDeclaredMethod("executeCommand", String.class);
             method.setAccessible(true);
             method.invoke(provider, "ls");
-        }
-        catch(NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
+        } catch(NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
             fail();
         }
     }
             Method method = provider.getClass().getDeclaredMethod("isolateSiteFromCluster", ArrayList.class, ArrayList.class, String.class);
             method.setAccessible(true);
             method.invoke(provider, activeList, standbyList, "80");
-        }
-        catch(NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
+        } catch(NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
             fail();
         }
     }
             Method method = provider.getClass().getDeclaredMethod("downUnreachableNodes", ArrayList.class, ArrayList.class, String.class);
             method.setAccessible(true);
             method.invoke(provider, activeList, standbyList, "80");
-        }
-        catch(NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
-            fail();
-        }
-    }
-
-    @Test
-    public void changeClusterVotingTest() {
-        try {
-            ClusterActor actor = new ClusterActor();
-            actor.setMember("some-member");
-            actor.setNode("some-Node");
-            ArrayList<ClusterActor> activeList = new ArrayList<>();
-            activeList.add(actor);
-            ArrayList<ClusterActor> standbyList = new ArrayList<>();
-            standbyList.add(actor);
-            Field field = provider.getClass().getDeclaredField("self");
-            field.setAccessible(true);
-            field.set(provider, actor);
-            Method method = provider.getClass().getDeclaredMethod("changeClusterVoting", FailoverOutputBuilder.class, ArrayList.class, ArrayList.class, String.class);
-            method.setAccessible(true);
-            method.invoke(provider, new FailoverOutputBuilder(), activeList, standbyList, "80");
-        }
-        catch(NoSuchMethodException | IllegalAccessException | InvocationTargetException | NoSuchFieldException e) {
+        } catch(NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
             fail();
         }
     }
             Method method = provider.getClass().getDeclaredMethod("backupMdSal", ArrayList.class, String.class);
             method.setAccessible(true);
             method.invoke(provider, activeList, "80");
-        }
-        catch(NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
+        } catch(NoSuchMethodException | IllegalAccessException | InvocationTargetException e) {
             fail();
         }
     }
-
 }
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.connection;
+
+import com.github.tomakehurst.wiremock.junit.WireMockRule;
+
+import org.junit.Rule;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+import static com.github.tomakehurst.wiremock.client.WireMock.*;
+
+public class ConnectionManagerTest {
+    @Rule
+    public WireMockRule wireMockRule = new WireMockRule(9999);
+
+    @Test
+    public void getConnectionResponseWithInput() throws Exception {
+        stubFor(post(urlEqualTo("/post"))
+                        .willReturn(aResponse().withStatus(200)));
+        ConnectionResponse response = ConnectionManager.getConnectionResponse("http://localhost:9999/post", ConnectionManager.HttpMethod.POST, "", "creds:creds");
+        assertNotNull(response);
+        assertEquals(200, response.statusCode);
+    }
+
+    @Test
+    public void getConnectionResponseWithCredentials() throws Exception {
+        stubFor(post(urlEqualTo("/post"))
+                        .willReturn(aResponse().withStatus(200)));
+        ConnectionResponse response = ConnectionManager.getConnectionResponse("http://localhost:9999/post", ConnectionManager.HttpMethod.POST, "", "creds:creds");
+        assertNotNull(response);
+        assertEquals(200, response.statusCode);
+    }
+
+    @Test
+    public void getConnectionResponse() throws Exception {
+        stubFor(get(urlEqualTo("/get"))
+                        .willReturn(aResponse().withStatus(200)
+                        .withBody("Multi\nLine\nResponse")));
+        ConnectionResponse response = ConnectionManager.getConnectionResponse("http://localhost:9999/get", ConnectionManager.HttpMethod.GET, null, null);
+        assertNotNull(response);
+        assertEquals(200, response.statusCode);
+        assertEquals("MultiLineResponse", response.content);
+    }
+}
\ No newline at end of file
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.connection;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class ConnectionResponseTest {
+    @Test
+    public void constructorTest() {
+        ConnectionResponse response = new ConnectionResponse();
+        assertNotNull(response);
+        assertEquals(0, response.statusCode);
+        assertNull(response.content);
+        assertTrue(response.toString().length() > 0);
+    }
+    @Test
+    public void withStatusCode() {
+        ConnectionResponse response = new ConnectionResponse().withStatusCode(123);
+        assertEquals(123, response.statusCode);
+    }
+}
\ No newline at end of file
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.data;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class AdminHealthTest {
+    @Test
+    public void constructorTest() {
+        AdminHealth health = new AdminHealth(Health.HEALTHY);
+        assertEquals(Health.HEALTHY, health.getHealth());
+    }
+
+    @Test
+    public void constructor2Test() {
+        AdminHealth health = new AdminHealth(Health.FAULTY, 500);
+        assertEquals(Health.FAULTY, health.getHealth());
+        assertEquals(500, health.getStatusCode());
+    }
+
+    @Test
+    public void setHealth() {
+        AdminHealth health = new AdminHealth(Health.HEALTHY, 201);
+        assertEquals(Health.HEALTHY, health.getHealth());
+        assertEquals(201, health.getStatusCode());
+        health.setHealth(Health.FAULTY);
+        assertEquals(Health.FAULTY, health.getHealth());
+    }
+
+    @Test
+    public void setStatusCode() {
+        AdminHealth health = new AdminHealth(Health.HEALTHY, 200);
+        assertEquals(Health.HEALTHY, health.getHealth());
+        assertEquals(200, health.getStatusCode());
+        health.setStatusCode(409);
+        assertEquals(409, health.getStatusCode());
+    }
+}
\ No newline at end of file
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.data;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class ClusterHealthTest {
+    @Test
+    public void constructorTest() {
+        ClusterHealth health = new ClusterHealth();
+        assertEquals(Health.FAULTY, health.getHealth());
+    }
+
+    @Test
+    public void withHealth() {
+        ClusterHealth health = new ClusterHealth().withHealth(Health.HEALTHY);
+        assertEquals(Health.HEALTHY, health.getHealth());
+    }
+
+    @Test
+    public void setHealth() {
+        ClusterHealth health = new ClusterHealth();
+        health.setHealth(Health.HEALTHY);
+        assertEquals(Health.HEALTHY, health.getHealth());
+    }
+}
\ No newline at end of file
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.data;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class DatabaseHealthTest {
+    @Test
+    public void constructorTest() {
+        DatabaseHealth health = new DatabaseHealth(Health.FAULTY);
+        assertEquals(Health.FAULTY, health.getHealth());
+    }
+
+    @Test
+    public void setHealth() {
+        DatabaseHealth health = new DatabaseHealth(Health.FAULTY);
+        assertEquals(Health.FAULTY, health.getHealth());
+        health.setHealth(Health.HEALTHY);
+        assertEquals(Health.HEALTHY, health.getHealth());
+    }
+}
\ No newline at end of file
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.data;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class FailoverStatusTest {
+    @Test
+    public void constructorTest() {
+        FailoverStatus status = new FailoverStatus();
+        assertEquals(200, status.getStatusCode());
+        assertEquals("Failover complete.", status.getMessage());
+    }
+    @Test
+    public void withStatusCode() {
+        FailoverStatus status = new FailoverStatus().withStatusCode(500);
+        assertEquals(500, status.getStatusCode());
+    }
+
+    @Test
+    public void withMessage() {
+        FailoverStatus status = new FailoverStatus().withMessage("Test");
+        assertEquals("Test", status.getMessage());
+    }
+
+    @Test
+    public void setStatusCode() {
+        FailoverStatus status = new FailoverStatus();
+        status.setStatusCode(500);
+        assertEquals(500, status.getStatusCode());
+    }
+
+    @Test
+    public void setMessage() {
+        FailoverStatus status = new FailoverStatus();
+        status.setMessage("Test");
+        assertEquals("Test", status.getMessage());
+    }
+}
\ No newline at end of file
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.data;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class HealthTest {
+    @Test
+    public void getHealth() {
+        assertEquals("HEALTHY", Health.HEALTHY.getHealth());
+        assertEquals("FAULTY", Health.FAULTY.getHealth());
+    }
+}
\ No newline at end of file
 
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2018 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
 package org.onap.ccsdk.sli.plugins.grtoolkit.data;
 
 import org.junit.Before;
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.data;
+
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+public class SiteHealthTest {
+    @Test
+    public void constructorTest() {
+        SiteHealth health = new SiteHealth();
+        assertNotNull(health.getAdminHealth());
+        assertNotNull(health.getDatabaseHealth());
+        assertNotNull(health.getClusterHealth());
+        assertEquals(Health.FAULTY, health.getHealth());
+    }
+    @Test
+    public void withAdminHealth() {
+        SiteHealth health = new SiteHealth().withAdminHealth(new AdminHealth(Health.HEALTHY));
+        assertEquals(Health.HEALTHY, health.getAdminHealth().get(0).getHealth());
+    }
+
+    @Test
+    public void withDatabaseHealth() {
+        SiteHealth health = new SiteHealth().withDatabaseHealth(new DatabaseHealth(Health.HEALTHY));
+        assertEquals(Health.HEALTHY, health.getDatabaseHealth().get(0).getHealth());
+    }
+
+    @Test
+    public void withClusterHealth() {
+        SiteHealth health = new SiteHealth().withClusterHealth(new ClusterHealth());
+        assertEquals(Health.FAULTY, health.getClusterHealth().get(0).getHealth());
+    }
+
+    @Test
+    public void withId() {
+        SiteHealth health = new SiteHealth().withId("My_ID");
+        assertEquals("My_ID", health.getId());
+    }
+
+    @Test
+    public void withRole() {
+        SiteHealth health = new SiteHealth().withRole("My_role");
+        assertEquals("My_role", health.getRole());
+    }
+
+    @Test
+    public void setHealth() {
+        SiteHealth health = new SiteHealth();
+        health.setHealth(Health.HEALTHY);
+        assertEquals(Health.HEALTHY, health.getHealth());
+    }
+
+    @Test
+    public void setAdminHealth() {
+        SiteHealth health = new SiteHealth().withAdminHealth(new AdminHealth(Health.HEALTHY));
+        health.setAdminHealth(null);
+        assertNull(health.getAdminHealth());
+    }
+
+    @Test
+    public void setDatabaseHealth() {
+        SiteHealth health = new SiteHealth().withDatabaseHealth(new DatabaseHealth(Health.HEALTHY));
+        health.setDatabaseHealth(null);
+        assertNull(health.getDatabaseHealth());
+    }
+
+    @Test
+    public void setClusterHealth() {
+        SiteHealth health = new SiteHealth().withClusterHealth(new ClusterHealth());
+        health.setClusterHealth(null);
+        assertNull(health.getClusterHealth());
+    }
+
+    @Test
+    public void setId() {
+        SiteHealth health = new SiteHealth().withId("My_ID");
+        health.setId("My_new_ID");
+        assertEquals("My_new_ID", health.getId());
+    }
+
+    @Test
+    public void setRole() {
+        SiteHealth health = new SiteHealth().withRole("My_role");
+        health.setRole("My_new_role");
+        assertEquals("My_new_role", health.getRole());
+    }
+}
\ No newline at end of file
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.resolver;
+
+import com.github.tomakehurst.wiremock.junit.WireMockRule;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.onap.ccsdk.sli.core.dblib.DBLibConnection;
+import org.onap.ccsdk.sli.core.dblib.DbLibService;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.AdminHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterActor;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.DatabaseHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.FailoverStatus;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.Health;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.SiteHealth;
+
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static com.github.tomakehurst.wiremock.client.WireMock.aResponse;
+import static com.github.tomakehurst.wiremock.client.WireMock.get;
+import static com.github.tomakehurst.wiremock.client.WireMock.stubFor;
+import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo;
+
+import static org.junit.Assert.*;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class SingleNodeHealthResolverTest {
+    private Map<String, ClusterActor> memberMap;
+    private DbLibService dbLibService;
+    private DBLibConnection connection;
+    private SingleNodeHealthResolver resolver;
+
+    @Rule
+    public WireMockRule wireMockRule = new WireMockRule(9999);
+
+    @Before
+    public void setUp() {
+        memberMap = generateMemberMap(1);
+        Properties properties = new Properties();
+        try(FileInputStream fileInputStream = new FileInputStream("src/test/resources/single/gr-toolkit.properties")) {
+            properties.load(fileInputStream);
+        } catch(IOException e) {
+            fail();
+        }
+
+        dbLibService = mock(DbLibService.class);
+        connection = mock(DBLibConnection.class);
+        resolver = new SingleNodeHealthResolver(memberMap, properties, dbLibService);
+    }
+
+    private Map<String, ClusterActor> generateMemberMap(int memberCount) {
+        Map<String, ClusterActor> map = new HashMap<>();
+        ClusterActor actor;
+        for(int ndx = 0; ndx < memberCount; ndx++) {
+            actor = new ClusterActor();
+            actor.setNode("localhost");
+            actor.setAkkaPort("2550");
+            actor.setMember("member-" + (ndx + 1));
+            actor.setUp(true);
+            actor.setUnreachable(false);
+
+            map.put(actor.getNode(),  actor);
+        }
+        return map;
+    }
+
+    @Test
+    public void getAdminHealthFaulty() {
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(500)));
+        AdminHealth health = resolver.getAdminHealth();
+        assertNotNull(health);
+        assertEquals(500, health.getStatusCode());
+        assertEquals(Health.FAULTY, health.getHealth());
+    }
+
+    @Test
+    public void getAdminHealthHealthy() {
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(200)));
+        AdminHealth health = resolver.getAdminHealth();
+        assertNotNull(health);
+        assertEquals(200, health.getStatusCode());
+        assertEquals(Health.HEALTHY, health.getHealth());
+    }
+
+    @Test
+    public void getDatabaseHealth() {
+        try {
+            when(connection.isReadOnly()).thenReturn(false);
+            when(connection.isClosed()).thenReturn(false);
+            when(dbLibService.isActive()).thenReturn(true);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        DatabaseHealth health = resolver.getDatabaseHealth();
+        assertEquals(Health.HEALTHY, health.getHealth());
+    }
+
+    @Test
+    public void getDatabaseHealthFaulty() {
+        try {
+            when(connection.isReadOnly()).thenReturn(true);
+            when(connection.isClosed()).thenReturn(true);
+            when(dbLibService.isActive()).thenReturn(false);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        DatabaseHealth health = resolver.getDatabaseHealth();
+        assertEquals(Health.FAULTY, health.getHealth());
+    }
+
+    @Test
+    public void getDatabaseHealthException() {
+        try {
+            when(connection.isReadOnly()).thenThrow(new SQLException());
+            when(connection.isClosed()).thenReturn(true);
+            when(dbLibService.isActive()).thenReturn(false);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        DatabaseHealth health = resolver.getDatabaseHealth();
+        assertEquals(Health.FAULTY, health.getHealth());
+    }
+
+    @Test
+    public void siteIdentifier() {
+        assertEquals("TestODL", resolver.getSiteIdentifier());
+        resolver.setSiteIdentifier("NewTestODL");
+        assertEquals("NewTestODL", resolver.getSiteIdentifier());
+    }
+
+    @Test
+    public void getClusterHealth() {
+        stubController();
+        ClusterHealth health = resolver.getClusterHealth();
+        assertEquals(Health.HEALTHY, health.getHealth());
+    }
+
+    private void stubController() {
+        String clusterBody = null;
+        String shardManagerBody = null;
+        String shardDefaultBody = null;
+        String shardOperationalBody = null;
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/single/cluster.json"))) {
+            clusterBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/single/shard-manager.json"))) {
+            shardManagerBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/single/default-config.json"))) {
+            shardDefaultBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/single/default-operational.json"))) {
+            shardOperationalBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+
+        if(clusterBody == null || shardManagerBody == null || shardDefaultBody == null || shardOperationalBody == null) {
+            fail();
+        }
+        stubFor(get(urlEqualTo("/jolokia/read/akka:type=Cluster")).willReturn(aResponse().withStatus(200).withBody(clusterBody)));
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=ShardManager,name=shard-manager-config,type=DistributedConfigDatastore")).willReturn(aResponse().withStatus(200).withBody(shardManagerBody)));
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-1-shard-default-config,type=DistributedConfigDatastore")).willReturn(aResponse().withStatus(200).withBody(shardDefaultBody)));
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-1-shard-default-operational,type=DistributedOperationalDatastore")).willReturn(aResponse().withStatus(200).withBody(shardOperationalBody)));
+    }
+
+    @Test
+    public void getSiteHealth() {
+        stubController();
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(200)));
+        try {
+            when(connection.isReadOnly()).thenReturn(false);
+            when(connection.isClosed()).thenReturn(false);
+            when(dbLibService.isActive()).thenReturn(true);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        List<SiteHealth> health = resolver.getSiteHealth();
+        assertNotNull(health);
+        assertNotEquals(0, health.size());
+        assertEquals(1, health.size());
+        assertEquals(Health.HEALTHY, health.get(0).getHealth());
+    }
+
+    @Test
+    public void tryFailover() {
+        FailoverStatus status = resolver.tryFailover(null);
+        assertEquals(400, status.getStatusCode());
+    }
+}
\ No newline at end of file
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.resolver;
+
+import com.github.tomakehurst.wiremock.junit.WireMockRule;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.onap.ccsdk.sli.core.dblib.DBLibConnection;
+import org.onap.ccsdk.sli.core.dblib.DbLibService;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.AdminHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterActor;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.DatabaseHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.FailoverStatus;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.Health;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.SiteHealth;
+
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static com.github.tomakehurst.wiremock.client.WireMock.aResponse;
+import static com.github.tomakehurst.wiremock.client.WireMock.get;
+import static com.github.tomakehurst.wiremock.client.WireMock.post;
+import static com.github.tomakehurst.wiremock.client.WireMock.stubFor;
+import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo;
+
+import static org.junit.Assert.*;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class SixNodeHealthResolverTest {
+    private Map<String, ClusterActor> memberMap;
+    private DbLibService dbLibService;
+    private DBLibConnection connection;
+    private SixNodeHealthResolver resolver;
+
+    @Rule
+    public WireMockRule wireMockRule = new WireMockRule(9999);
+
+    @Before
+    public void setUp() {
+        memberMap = generateMemberMap(6);
+        Properties properties = new Properties();
+        try(FileInputStream fileInputStream = new FileInputStream("src/test/resources/six/gr-toolkit.properties")) {
+            properties.load(fileInputStream);
+        } catch(IOException e) {
+            fail();
+        }
+
+        dbLibService = mock(DbLibService.class);
+        connection = mock(DBLibConnection.class);
+        resolver = new SixNodeHealthResolver(memberMap, properties, dbLibService);
+    }
+
+    private Map<String, ClusterActor> generateMemberMap(int memberCount) {
+        Map<String, ClusterActor> map = new HashMap<>();
+        ClusterActor actor;
+        for(int ndx = 0; ndx < memberCount; ndx++) {
+            actor = new ClusterActor();
+            actor.setNode("127.0.1." + (ndx + 1));
+            actor.setAkkaPort("2550");
+            actor.setMember("member-" + (ndx + 1));
+            actor.setUp(true);
+            actor.setUnreachable(false);
+
+            map.put(actor.getNode(),  actor);
+        }
+        return map;
+    }
+
+    @Test
+    public void getAdminHealthFaulty() {
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(500)));
+        AdminHealth health = resolver.getAdminHealth();
+        assertNotNull(health);
+        assertEquals(500, health.getStatusCode());
+        assertEquals(Health.FAULTY, health.getHealth());
+    }
+
+    @Test
+    public void getAdminHealthHealthy() {
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(200)));
+        AdminHealth health = resolver.getAdminHealth();
+        assertNotNull(health);
+        assertEquals(200, health.getStatusCode());
+        assertEquals(Health.HEALTHY, health.getHealth());
+    }
+
+    @Test
+    public void getDatabaseHealth() {
+        try {
+            when(connection.isReadOnly()).thenReturn(false);
+            when(connection.isClosed()).thenReturn(false);
+            when(dbLibService.isActive()).thenReturn(true);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        DatabaseHealth health = resolver.getDatabaseHealth();
+        assertEquals(Health.HEALTHY, health.getHealth());
+    }
+
+    @Test
+    public void getDatabaseHealthFaulty() {
+        try {
+            when(connection.isReadOnly()).thenReturn(true);
+            when(connection.isClosed()).thenReturn(true);
+            when(dbLibService.isActive()).thenReturn(false);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        DatabaseHealth health = resolver.getDatabaseHealth();
+        assertEquals(Health.FAULTY, health.getHealth());
+    }
+
+    @Test
+    public void getDatabaseHealthException() {
+        try {
+            when(connection.isReadOnly()).thenThrow(new SQLException());
+            when(connection.isClosed()).thenReturn(true);
+            when(dbLibService.isActive()).thenReturn(false);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        DatabaseHealth health = resolver.getDatabaseHealth();
+        assertEquals(Health.FAULTY, health.getHealth());
+    }
+
+    @Test
+    public void siteIdentifier() {
+        assertEquals("TestODL", resolver.getSiteIdentifier());
+        resolver.setSiteIdentifier("NewTestODL");
+        assertEquals("NewTestODL", resolver.getSiteIdentifier());
+    }
+
+    @Test
+    public void getClusterHealth() {
+        stubController();
+        ClusterHealth health = resolver.getClusterHealth();
+        assertEquals(Health.HEALTHY, health.getHealth());
+    }
+
+    private void stubController() {
+        String clusterBody = null;
+        String shardManagerBody = null;
+        String shardDefaultBody = null;
+        String shardOperationalBody = null;
+        String componentBody = null;
+        String identifierBody = null;
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/six/cluster.json"))) {
+            clusterBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/six/shard-manager.json"))) {
+            shardManagerBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/six/default-config.json"))) {
+            shardDefaultBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/six/default-operational.json"))) {
+            shardOperationalBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/six/component-health.json"))) {
+            componentBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/six/site-identifier.json"))) {
+            identifierBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+
+        if(clusterBody == null || shardManagerBody == null || shardDefaultBody == null || shardOperationalBody == null
+            || componentBody == null || identifierBody == null) {
+            fail();
+        }
+        stubFor(get(urlEqualTo("/jolokia/read/akka:type=Cluster")).willReturn(aResponse().withStatus(200).withBody(clusterBody)));
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=ShardManager,name=shard-manager-config,type=DistributedConfigDatastore")).inScenario("testing").willReturn(aResponse().withStatus(200).withBody(shardManagerBody)).willSetStateTo("next"));
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-1-shard-default-config,type=DistributedConfigDatastore")).inScenario("testing").willReturn(aResponse().withStatus(200).withBody(shardDefaultBody)).willSetStateTo("next"));
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-1-shard-default-operational,type=DistributedOperationalDatastore")).willReturn(aResponse().withStatus(200).withBody(shardOperationalBody)));
+        stubFor(post(urlEqualTo("/restconf/operations/gr-toolkit:site-identifier")).willReturn(aResponse().withStatus(200).withBody(identifierBody)));
+        stubFor(post(urlEqualTo("/restconf/operations/gr-toolkit:admin-health")).inScenario("testing").willReturn(aResponse().withStatus(200).withBody(componentBody)).willSetStateTo("next"));
+        stubFor(post(urlEqualTo("/restconf/operations/gr-toolkit:database-health")).willReturn(aResponse().withStatus(200).withBody(componentBody)));
+    }
+
+    @Test
+    public void getSiteHealth() {
+        stubController();
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(200)));
+        try {
+            when(connection.isReadOnly()).thenReturn(false);
+            when(connection.isClosed()).thenReturn(false);
+            when(dbLibService.isActive()).thenReturn(true);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        List<SiteHealth> health = resolver.getSiteHealth();
+        assertNotNull(health);
+        assertNotEquals(0, health.size());
+        assertEquals(2, health.size());
+        assertEquals(Health.HEALTHY, health.get(0).getHealth());
+    }
+
+    @Test
+    public void getSiteHealthFaulty() {
+        stubController();
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(200)));
+        try {
+            when(connection.isReadOnly()).thenReturn(false);
+            when(connection.isClosed()).thenReturn(false);
+            when(dbLibService.isActive()).thenReturn(true);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=ShardManager,name=shard-manager-config,type=DistributedConfigDatastore")).inScenario("testing").whenScenarioStateIs("next").willReturn(aResponse().withBodyFile("nonexistent")));
+        List<SiteHealth> health = resolver.getSiteHealth();
+        assertNotNull(health);
+        assertNotEquals(0, health.size());
+        assertEquals(2, health.size());
+        assertEquals(Health.FAULTY, health.get(0).getHealth());
+    }
+
+    @Test
+    public void getSiteHealthFaultyShard() {
+        stubController();
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(200)));
+        try {
+            when(connection.isReadOnly()).thenReturn(false);
+            when(connection.isClosed()).thenReturn(false);
+            when(dbLibService.isActive()).thenReturn(true);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-1-shard-default-config,type=DistributedConfigDatastore")).inScenario("testing").willReturn(aResponse().withBodyFile("nonexistent")).willSetStateTo("next"));
+        List<SiteHealth> health = resolver.getSiteHealth();
+        assertNotNull(health);
+        assertNotEquals(0, health.size());
+        assertEquals(2, health.size());
+        assertEquals(Health.FAULTY, health.get(0).getHealth());
+    }
+
+    @Test
+    public void getSiteHealthFaultyCluster() {
+        stubController();
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(200)));
+        try {
+            when(connection.isReadOnly()).thenReturn(false);
+            when(connection.isClosed()).thenReturn(false);
+            when(dbLibService.isActive()).thenReturn(true);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        stubFor(get(urlEqualTo("/jolokia/read/akka:type=Cluster")).willReturn(aResponse().withStatus(200).withBodyFile("nonexistent")));
+        List<SiteHealth> health = resolver.getSiteHealth();
+        assertNotNull(health);
+        assertNotEquals(0, health.size());
+        assertEquals(2, health.size());
+        assertEquals(Health.FAULTY, health.get(0).getHealth());
+    }
+
+    @Test
+    public void getSiteHealthFaultyAdmin() {
+        stubController();
+        stubFor(post(urlEqualTo("/restconf/operations/gr-toolkit:admin-health")).inScenario("testing").willReturn(aResponse().withBodyFile("nonexistent")).willSetStateTo("next"));
+        stubFor(get(urlEqualTo("/restconf/operations/gr-toolkit:admin-health")).inScenario("testing").whenScenarioStateIs("next").willReturn(aResponse().withBodyFile("nonexistent")));
+        try {
+            when(connection.isReadOnly()).thenReturn(false);
+            when(connection.isClosed()).thenReturn(false);
+            when(dbLibService.isActive()).thenReturn(true);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        List<SiteHealth> health = resolver.getSiteHealth();
+        assertNotNull(health);
+        assertNotEquals(0, health.size());
+        assertEquals(2, health.size());
+        assertEquals(Health.FAULTY, health.get(0).getHealth());
+        assertEquals(Health.FAULTY, health.get(1).getHealth());
+    }
+
+    @Test
+    public void tryFailover() {
+        stubController();
+        stubFor(get(urlEqualTo("/restconf/operations/cluster-admin:change-member-voting-states-for-all-shards")).willReturn(aResponse().withStatus(200)));
+        FailoverStatus status = resolver.tryFailover(null);
+        assertEquals(500, status.getStatusCode());
+    }
+}
\ No newline at end of file
 
--- /dev/null
+/*-
+ * ============LICENSE_START=======================================================
+ * openECOMP : SDN-C
+ * ================================================================================
+ * Copyright (C) 2019 AT&T Intellectual Property. All rights
+ *                     reserved.
+ * ================================================================================
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ============LICENSE_END=========================================================
+ */
+
+package org.onap.ccsdk.sli.plugins.grtoolkit.resolver;
+
+import com.github.tomakehurst.wiremock.junit.WireMockRule;
+
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+
+import org.onap.ccsdk.sli.core.dblib.DBLibConnection;
+import org.onap.ccsdk.sli.core.dblib.DbLibService;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.AdminHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterActor;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.ClusterHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.DatabaseHealth;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.FailoverStatus;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.Health;
+import org.onap.ccsdk.sli.plugins.grtoolkit.data.SiteHealth;
+
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static com.github.tomakehurst.wiremock.client.WireMock.aResponse;
+import static com.github.tomakehurst.wiremock.client.WireMock.get;
+import static com.github.tomakehurst.wiremock.client.WireMock.stubFor;
+import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo;
+
+import static org.junit.Assert.*;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class ThreeNodeHealthResolverTest {
+    private Map<String, ClusterActor> memberMap;
+    private DbLibService dbLibService;
+    private DBLibConnection connection;
+    private ThreeNodeHealthResolver resolver;
+
+    @Rule
+    public WireMockRule wireMockRule = new WireMockRule(9999);
+
+    @Before
+    public void setUp() {
+        memberMap = generateMemberMap(3);
+        Properties properties = new Properties();
+        try(FileInputStream fileInputStream = new FileInputStream("src/test/resources/three/gr-toolkit.properties")) {
+            properties.load(fileInputStream);
+        } catch(IOException e) {
+            fail();
+        }
+
+        dbLibService = mock(DbLibService.class);
+        connection = mock(DBLibConnection.class);
+        resolver = new ThreeNodeHealthResolver(memberMap, properties, dbLibService);
+    }
+
+    private Map<String, ClusterActor> generateMemberMap(int memberCount) {
+        Map<String, ClusterActor> map = new HashMap<>();
+        ClusterActor actor;
+        for(int ndx = 0; ndx < memberCount; ndx++) {
+            actor = new ClusterActor();
+            actor.setNode("127.0.1." + (ndx + 1));
+            actor.setAkkaPort("2550");
+            actor.setMember("member-" + (ndx + 1));
+            actor.setUp(true);
+            actor.setUnreachable(false);
+
+            map.put(actor.getNode(),  actor);
+        }
+        return map;
+    }
+
+    @Test
+    public void getAdminHealthFaulty() {
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(500)));
+        AdminHealth health = resolver.getAdminHealth();
+        assertNotNull(health);
+        assertEquals(500, health.getStatusCode());
+        assertEquals(Health.FAULTY, health.getHealth());
+    }
+
+    @Test
+    public void getAdminHealthHealthy() {
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(200)));
+        AdminHealth health = resolver.getAdminHealth();
+        assertNotNull(health);
+        assertEquals(200, health.getStatusCode());
+        assertEquals(Health.HEALTHY, health.getHealth());
+    }
+
+    @Test
+    public void getDatabaseHealth() {
+        try {
+            when(connection.isReadOnly()).thenReturn(false);
+            when(connection.isClosed()).thenReturn(false);
+            when(dbLibService.isActive()).thenReturn(true);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        DatabaseHealth health = resolver.getDatabaseHealth();
+        assertEquals(Health.HEALTHY, health.getHealth());
+    }
+
+    @Test
+    public void getDatabaseHealthFaulty() {
+        try {
+            when(connection.isReadOnly()).thenReturn(true);
+            when(connection.isClosed()).thenReturn(true);
+            when(dbLibService.isActive()).thenReturn(false);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        DatabaseHealth health = resolver.getDatabaseHealth();
+        assertEquals(Health.FAULTY, health.getHealth());
+    }
+
+    @Test
+    public void getDatabaseHealthException() {
+        try {
+            when(connection.isReadOnly()).thenThrow(new SQLException());
+            when(connection.isClosed()).thenReturn(true);
+            when(dbLibService.isActive()).thenReturn(false);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        DatabaseHealth health = resolver.getDatabaseHealth();
+        assertEquals(Health.FAULTY, health.getHealth());
+    }
+
+    @Test
+    public void siteIdentifier() {
+        assertEquals("TestODL", resolver.getSiteIdentifier());
+        resolver.setSiteIdentifier("NewTestODL");
+        assertEquals("NewTestODL", resolver.getSiteIdentifier());
+    }
+
+    @Test
+    public void getClusterHealth() {
+        stubController();
+        ClusterHealth health = resolver.getClusterHealth();
+        assertEquals(Health.HEALTHY, health.getHealth());
+    }
+
+    private void stubController() {
+        String clusterBody = null;
+        String shardManagerBody = null;
+        String shardDefaultBody = null;
+        String shardOperationalBody = null;
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/three/cluster.json"))) {
+            clusterBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/three/shard-manager.json"))) {
+            shardManagerBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/three/default-config.json"))) {
+            shardDefaultBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+        try(Stream<String> stream = Files.lines(Paths.get("src/test/resources/three/default-operational.json"))) {
+            shardOperationalBody = stream.collect(Collectors.joining());
+        } catch(IOException e) {
+            fail();
+        }
+
+        if(clusterBody == null || shardManagerBody == null || shardDefaultBody == null || shardOperationalBody == null) {
+            fail();
+        }
+        stubFor(get(urlEqualTo("/jolokia/read/akka:type=Cluster")).willReturn(aResponse().withStatus(200).withBody(clusterBody)));
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=ShardManager,name=shard-manager-config,type=DistributedConfigDatastore")).inScenario("testing").willReturn(aResponse().withStatus(200).withBody(shardManagerBody)).willSetStateTo("next"));
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-1-shard-default-config,type=DistributedConfigDatastore")).inScenario("testing").willReturn(aResponse().withStatus(200).withBody(shardDefaultBody)).willSetStateTo("next"));
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-1-shard-default-operational,type=DistributedOperationalDatastore")).willReturn(aResponse().withStatus(200).withBody(shardOperationalBody)));
+    }
+
+    @Test
+    public void getSiteHealth() {
+        stubController();
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(200)));
+        try {
+            when(connection.isReadOnly()).thenReturn(false);
+            when(connection.isClosed()).thenReturn(false);
+            when(dbLibService.isActive()).thenReturn(true);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        List<SiteHealth> health = resolver.getSiteHealth();
+        assertNotNull(health);
+        assertNotEquals(0, health.size());
+        assertEquals(1, health.size());
+        assertEquals(Health.HEALTHY, health.get(0).getHealth());
+    }
+
+    @Test
+    public void getSiteHealthFaulty() {
+        stubController();
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(200)));
+        try {
+            when(connection.isReadOnly()).thenReturn(false);
+            when(connection.isClosed()).thenReturn(false);
+            when(dbLibService.isActive()).thenReturn(true);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=ShardManager,name=shard-manager-config,type=DistributedConfigDatastore")).inScenario("testing").whenScenarioStateIs("next").willReturn(aResponse().withBodyFile("nonexistent")));
+        List<SiteHealth> health = resolver.getSiteHealth();
+        assertNotNull(health);
+        assertNotEquals(0, health.size());
+        assertEquals(1, health.size());
+        assertEquals(Health.FAULTY, health.get(0).getHealth());
+    }
+
+    @Test
+    public void getSiteHealthFaultyShard() {
+        stubController();
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(200)));
+        try {
+            when(connection.isReadOnly()).thenReturn(false);
+            when(connection.isClosed()).thenReturn(false);
+            when(dbLibService.isActive()).thenReturn(true);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        stubFor(get(urlEqualTo("/jolokia/read/org.opendaylight.controller:Category=Shards,name=member-1-shard-default-config,type=DistributedConfigDatastore")).inScenario("testing").willReturn(aResponse().withBodyFile("nonexistent")).willSetStateTo("next"));
+        List<SiteHealth> health = resolver.getSiteHealth();
+        assertNotNull(health);
+        assertNotEquals(0, health.size());
+        assertEquals(1, health.size());
+        assertEquals(Health.HEALTHY, health.get(0).getHealth());
+    }
+
+    @Test
+    public void getSiteHealthFaultyCluster() {
+        stubController();
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(200)));
+        try {
+            when(connection.isReadOnly()).thenReturn(false);
+            when(connection.isClosed()).thenReturn(false);
+            when(dbLibService.isActive()).thenReturn(true);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        stubFor(get(urlEqualTo("/jolokia/read/akka:type=Cluster")).willReturn(aResponse().withStatus(200).withBodyFile("nonexistent")));
+        List<SiteHealth> health = resolver.getSiteHealth();
+        assertNotNull(health);
+        assertNotEquals(0, health.size());
+        assertEquals(1, health.size());
+        assertEquals(Health.FAULTY, health.get(0).getHealth());
+    }
+
+    @Test
+    public void getSiteHealthFaultyAdmin() {
+        stubController();
+        stubFor(get(urlEqualTo("/adm/healthcheck")).willReturn(aResponse().withStatus(400)));
+        try {
+            when(connection.isReadOnly()).thenReturn(false);
+            when(connection.isClosed()).thenReturn(false);
+            when(dbLibService.isActive()).thenReturn(true);
+            when(dbLibService.getConnection()).thenReturn(connection);
+        } catch(SQLException e) {
+            fail();
+        }
+        List<SiteHealth> health = resolver.getSiteHealth();
+        assertNotNull(health);
+        assertNotEquals(0, health.size());
+        assertEquals(1, health.size());
+        assertEquals(Health.FAULTY, health.get(0).getHealth());
+    }
+
+    @Test
+    public void tryFailover() {
+        FailoverStatus status = resolver.tryFailover(null);
+        assertEquals(400, status.getStatusCode());
+    }
+}
\ No newline at end of file
 
+# ============LICENSE_START=======================================================
+# openECOMP : SDN-C
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+#                      reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
 
 odl-cluster-data {
   akka {
 
     cluster {
       # Remove ".tcp" when using artery.
-      seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550"]
+      seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.1.1:2550"]
 
       roles = [
         "member-1"
 
 # limitations under the License.
 # ============LICENSE_END=========================================================
 
+resolver=org.onap.ccsdk.sli.plugins.grtoolkit.resolver.SingleNodeHealthResolver
 akka.conf.location=src/test/resources/akka.conf
 adm.useSsl=false
-adm.fqdn=wiki.onap.org
-adm.healthcheck=
-adm.port.http=80
-adm.port.ssl=443
+adm.fqdn=localhost
+adm.healthcheck=/adm/healthcheck
+adm.port.http=9999
+adm.port.ssl=19999
 controller.credentials=admin:admin
 controller.useSsl=false
-controller.port.http=8181
-controller.port.ssl=8443
+controller.port.http=9999
+controller.port.ssl=19999
 controller.port.akka=2550
 mbean.cluster=/jolokia/read/akka:type=Cluster
 mbean.shardManager=/jolokia/read/org.opendaylight.controller:Category=ShardManager,name=shard-manager-config,type=DistributedConfigDatastore
 mbean.shard.config=/jolokia/read/org.opendaylight.controller:Category=Shards,name=%s,type=DistributedConfigDatastore
-site.identifier=TestODL
+#site.identifier=TestODL
 
--- /dev/null
+# ============LICENSE_START=======================================================
+# openECOMP : SDN-C
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+#                      reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+odl-cluster-data {
+  akka {
+    remote {
+      artery {
+        enabled = off
+        canonical.hostname = "127.0.0.1"
+        canonical.port = 2550
+      }
+      netty.tcp {
+        hostname = "127.0.0.1"
+        port = 2550
+      }
+      # when under load we might trip a false positive on the failure detector
+      # transport-failure-detector {
+        # heartbeat-interval = 4 s
+        # acceptable-heartbeat-pause = 16s
+      # }
+    }
+
+    cluster {
+      # Remove ".tcp" when using artery.
+      seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.1.1:2550"]
+
+      roles = [
+        "member-1"
+      ]
+
+    }
+
+    persistence {
+      # By default the snapshots/journal directories live in KARAF_HOME. You can choose to put it somewhere else by
+      # modifying the following two properties. The directory location specified may be a relative or absolute path.
+      # The relative path is always relative to KARAF_HOME.
+
+      # snapshot-store.local.dir = "target/snapshots"
+      # journal.leveldb.dir = "target/journal"
+
+      journal {
+        leveldb {
+          # Set native = off to use a Java-only implementation of leveldb.
+          # Note that the Java-only version is not currently considered by Akka to be production quality.
+
+          # native = off
+        }
+      }
+    }
+  }
+}
 
--- /dev/null
+{
+  "request": {
+    "mbean": "akka:type=Cluster",
+    "type": "read"
+  },
+  "value": {
+    "Leader": "akka.tcp://opendaylight-cluster-data@localhost:2550",
+    "Unreachable": "",
+    "Singleton": true,
+    "Available": true,
+    "MemberStatus": "Up",
+    "ClusterStatus": "{\n  \"members\": [\n    {\n      \"address\": \"akka.tcp://opendaylight-cluster-data@localhost:2550\",\n      \"roles\": [\n        \"dc-default\",\n        \"member-1\"\n      ],\n      \"status\": \"Up\"\n    }\n  ],\n  \"self-address\": \"akka.tcp://opendaylight-cluster-data@localhost:2550\",\n  \"unreachable\": []\n}\n",
+    "Members": "akka.tcp://opendaylight-cluster-data@localhost:2550"
+  },
+  "timestamp": 1575393881,
+  "status": 200
+}
 
--- /dev/null
+{
+  "request": {
+    "mbean": "org.opendaylight.controller:Category=Shards,name=member-1-shard-default-config,type=DistributedConfigDatastore",
+    "type": "read"
+  },
+  "value": {
+    "ReadWriteTransactionCount": 0,
+    "SnapshotIndex": 22,
+    "InMemoryJournalLogSize": 1,
+    "ReplicatedToAllIndex": -1,
+    "Leader": "member-1-shard-default-config",
+    "LastIndex": 23,
+    "RaftState": "Leader",
+    "LastCommittedTransactionTime": "2019-12-03 16:36:39.413",
+    "LastApplied": 23,
+    "PeerAddresses": "",
+    "LastLogIndex": 23,
+    "LastLeadershipChangeTime": "2019-12-03 16:36:33.460",
+    "WriteOnlyTransactionCount": 0,
+    "FollowerInitialSyncStatus": false,
+    "FollowerInfo": [],
+    "FailedReadTransactionsCount": 0,
+    "Voting": true,
+    "StatRetrievalTime": "454.8 μs",
+    "CurrentTerm": 4,
+    "LastTerm": 4,
+    "FailedTransactionsCount": 0,
+    "PendingTxCommitQueueSize": 0,
+    "VotedFor": "member-1-shard-default-config",
+    "SnapshotCaptureInitiated": false,
+    "CommittedTransactionsCount": 5,
+    "TxCohortCacheSize": 0,
+    "PeerVotingStates": "",
+    "LastLogTerm": 4,
+    "StatRetrievalError": null,
+    "CommitIndex": 23,
+    "SnapshotTerm": 4,
+    "AbortTransactionsCount": 0,
+    "ReadOnlyTransactionCount": 0,
+    "ShardName": "member-1-shard-default-config",
+    "LeadershipChangeCount": 1,
+    "InMemoryJournalDataSize": 37
+  },
+  "timestamp": 1575393787,
+  "status": 200
+}
\ No newline at end of file
 
--- /dev/null
+{
+  "request": {
+    "mbean": "org.opendaylight.controller:Category=Shards,name=member-1-shard-default-operational,type=DistributedOperationalDatastore",
+    "type": "read"
+  },
+  "value": {
+    "ReadWriteTransactionCount": 0,
+    "SnapshotIndex": 22,
+    "InMemoryJournalLogSize": 1,
+    "ReplicatedToAllIndex": -1,
+    "Leader": "member-1-shard-default-operational",
+    "LastIndex": 23,
+    "RaftState": "Leader",
+    "LastCommittedTransactionTime": "2019-12-03 16:36:39.413",
+    "LastApplied": 23,
+    "PeerAddresses": "",
+    "LastLogIndex": 23,
+    "LastLeadershipChangeTime": "2019-12-03 16:36:33.460",
+    "WriteOnlyTransactionCount": 0,
+    "FollowerInitialSyncStatus": false,
+    "FollowerInfo": [],
+    "FailedReadTransactionsCount": 0,
+    "Voting": true,
+    "StatRetrievalTime": "454.8 μs",
+    "CurrentTerm": 4,
+    "LastTerm": 4,
+    "FailedTransactionsCount": 0,
+    "PendingTxCommitQueueSize": 0,
+    "VotedFor": "member-1-shard-default-operational",
+    "SnapshotCaptureInitiated": false,
+    "CommittedTransactionsCount": 5,
+    "TxCohortCacheSize": 0,
+    "PeerVotingStates": "",
+    "LastLogTerm": 4,
+    "StatRetrievalError": null,
+    "CommitIndex": 23,
+    "SnapshotTerm": 4,
+    "AbortTransactionsCount": 0,
+    "ReadOnlyTransactionCount": 0,
+    "ShardName": "member-1-shard-default-operational",
+    "LeadershipChangeCount": 1,
+    "InMemoryJournalDataSize": 37
+  },
+  "timestamp": 1575393787,
+  "status": 200
+}
\ No newline at end of file
 
--- /dev/null
+# ============LICENSE_START=======================================================
+# openECOMP : SDN-C
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+#                      reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+akka.conf.location=src/test/resources/single/akka.conf
+adm.useSsl=false
+adm.fqdn=localhost
+adm.healthcheck=/adm/healthcheck
+adm.port.http=9999
+adm.port.ssl=19999
+controller.credentials=admin:admin
+controller.useSsl=false
+controller.port.http=9999
+controller.port.ssl=19999
+controller.port.akka=2550
+mbean.cluster=/jolokia/read/akka:type=Cluster
+mbean.shardManager=/jolokia/read/org.opendaylight.controller:Category=ShardManager,name=shard-manager-config,type=DistributedConfigDatastore
+mbean.shard.config=/jolokia/read/org.opendaylight.controller:Category=Shards,name=%s,type=DistributedConfigDatastore
+site.identifier=TestODL
 
--- /dev/null
+{
+  "request": {
+    "mbean": "org.opendaylight.controller:Category=ShardManager,name=shard-manager-config,type=DistributedConfigDatastore",
+    "type": "read"
+  },
+  "value": {
+    "LocalShards": [
+      "member-1-shard-default-config"
+    ],
+    "SyncStatus": true,
+    "MemberName": "member-1"
+  },
+  "timestamp": 1575393918,
+  "status": 200
+}
\ No newline at end of file
 
+# ============LICENSE_START=======================================================
+# openECOMP : SDN-C
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+#                      reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
 
 odl-cluster-data {
   akka {
 
--- /dev/null
+{
+  "request": {
+    "mbean": "akka:type=Cluster",
+    "type": "read"
+  },
+  "value": {
+    "Leader": "akka.tcp://opendaylight-cluster-data@localhost:2550",
+    "Unreachable": "",
+    "Singleton": true,
+    "Available": true,
+    "MemberStatus": "Up",
+    "ClusterStatus": "{\n  \"members\": [\n    {\n      \"address\": \"akka.tcp://opendaylight-cluster-data@localhost:2550\",\n      \"roles\": [\n        \"dc-default\",\n        \"member-1\"\n      ],\n      \"status\": \"Up\"\n    },\n    {\n      \"address\": \"akka.tcp://opendaylight-cluster-data@localhost:2550\",\n      \"roles\": [\n        \"dc-default\",\n        \"member-2\"\n      ],\n      \"status\": \"Up\"\n    },\n    {\n      \"address\": \"akka.tcp://opendaylight-cluster-data@localhost:2550\",\n      \"roles\": [\n        \"dc-default\",\n        \"member-3\"\n      ],\n      \"status\": \"Up\"\n    }\n  ],\n  \"self-address\": \"akka.tcp://opendaylight-cluster-data@localhost:2550\",\n  \"unreachable\": []\n}\n",
+    "Members": "akka.tcp://opendaylight-cluster-data@localhost:2550"
+  },
+  "timestamp": 1575393881,
+  "status": 200
+}
\ No newline at end of file
 
--- /dev/null
+{
+  "output": {
+    "status": "200",
+    "served-by": "member-2",
+    "health": "HEALTHY"
+  }
+}
 
--- /dev/null
+{
+  "request": {
+    "mbean": "org.opendaylight.controller:Category=Shards,name=member-1-shard-default-config,type=DistributedConfigDatastore",
+    "type": "read"
+  },
+  "value": {
+    "ReadWriteTransactionCount": 0,
+    "SnapshotIndex": 22,
+    "InMemoryJournalLogSize": 1,
+    "ReplicatedToAllIndex": -1,
+    "Leader": "member-1-shard-default-config",
+    "LastIndex": 23,
+    "RaftState": "Leader",
+    "LastCommittedTransactionTime": "2019-12-03 16:36:39.413",
+    "LastApplied": 23,
+    "PeerAddresses": "",
+    "LastLogIndex": 23,
+    "LastLeadershipChangeTime": "2019-12-03 16:36:33.460",
+    "WriteOnlyTransactionCount": 0,
+    "FollowerInitialSyncStatus": false,
+    "FollowerInfo": [],
+    "FailedReadTransactionsCount": 0,
+    "Voting": true,
+    "StatRetrievalTime": "454.8 μs",
+    "CurrentTerm": 4,
+    "LastTerm": 4,
+    "FailedTransactionsCount": 0,
+    "PendingTxCommitQueueSize": 0,
+    "VotedFor": "member-1-shard-default-config",
+    "SnapshotCaptureInitiated": false,
+    "CommittedTransactionsCount": 5,
+    "TxCohortCacheSize": 0,
+    "PeerVotingStates": "",
+    "LastLogTerm": 4,
+    "StatRetrievalError": null,
+    "CommitIndex": 23,
+    "SnapshotTerm": 4,
+    "AbortTransactionsCount": 0,
+    "ReadOnlyTransactionCount": 0,
+    "ShardName": "member-1-shard-default-config",
+    "LeadershipChangeCount": 1,
+    "InMemoryJournalDataSize": 37
+  },
+  "timestamp": 1575393787,
+  "status": 200
+}
\ No newline at end of file
 
--- /dev/null
+{
+  "request": {
+    "mbean": "org.opendaylight.controller:Category=Shards,name=member-1-shard-default-operational,type=DistributedOperationalDatastore",
+    "type": "read"
+  },
+  "value": {
+    "ReadWriteTransactionCount": 0,
+    "SnapshotIndex": 22,
+    "InMemoryJournalLogSize": 1,
+    "ReplicatedToAllIndex": -1,
+    "Leader": "member-1-shard-default-operational",
+    "LastIndex": 23,
+    "RaftState": "Leader",
+    "LastCommittedTransactionTime": "2019-12-03 16:36:39.413",
+    "LastApplied": 23,
+    "PeerAddresses": "",
+    "LastLogIndex": 23,
+    "LastLeadershipChangeTime": "2019-12-03 16:36:33.460",
+    "WriteOnlyTransactionCount": 0,
+    "FollowerInitialSyncStatus": false,
+    "FollowerInfo": [],
+    "FailedReadTransactionsCount": 0,
+    "Voting": true,
+    "StatRetrievalTime": "454.8 μs",
+    "CurrentTerm": 4,
+    "LastTerm": 4,
+    "FailedTransactionsCount": 0,
+    "PendingTxCommitQueueSize": 0,
+    "VotedFor": "member-1-shard-default-operational",
+    "SnapshotCaptureInitiated": false,
+    "CommittedTransactionsCount": 5,
+    "TxCohortCacheSize": 0,
+    "PeerVotingStates": "",
+    "LastLogTerm": 4,
+    "StatRetrievalError": null,
+    "CommitIndex": 23,
+    "SnapshotTerm": 4,
+    "AbortTransactionsCount": 0,
+    "ReadOnlyTransactionCount": 0,
+    "ShardName": "member-1-shard-default-operational",
+    "LeadershipChangeCount": 1,
+    "InMemoryJournalDataSize": 37
+  },
+  "timestamp": 1575393787,
+  "status": 200
+}
\ No newline at end of file
 
--- /dev/null
+# ============LICENSE_START=======================================================
+# openECOMP : SDN-C
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+#                      reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+akka.conf.location=src/test/resources/six/akka.conf
+adm.useSsl=false
+adm.fqdn=localhost
+adm.healthcheck=/adm/healthcheck
+adm.port.http=9999
+adm.port.ssl=19999
+controller.credentials=admin:admin
+controller.useSsl=false
+controller.port.http=9999
+controller.port.ssl=19999
+controller.port.akka=2550
+mbean.cluster=/jolokia/read/akka:type=Cluster
+mbean.shardManager=/jolokia/read/org.opendaylight.controller:Category=ShardManager,name=shard-manager-config,type=DistributedConfigDatastore
+mbean.shard.config=/jolokia/read/org.opendaylight.controller:Category=Shards,name=%s,type=DistributedConfigDatastore
+site.identifier=TestODL
 
--- /dev/null
+{
+  "request": {
+    "mbean": "org.opendaylight.controller:Category=ShardManager,name=shard-manager-config,type=DistributedConfigDatastore",
+    "type": "read"
+  },
+  "value": {
+    "LocalShards": [
+      "member-1-shard-default-config"
+    ],
+    "SyncStatus": true,
+    "MemberName": "member-1"
+  },
+  "timestamp": 1575393918,
+  "status": 200
+}
\ No newline at end of file
 
--- /dev/null
+{
+  "output": {
+    "status": "200",
+    "id": "test-site",
+    "served-by": "member-1"
+  }
+}
 
--- /dev/null
+# ============LICENSE_START=======================================================
+# openECOMP : SDN-C
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+#                      reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+odl-cluster-data {
+  akka {
+    remote {
+      artery {
+        enabled = off
+        canonical.hostname = "127.0.0.1"
+        canonical.port = 2550
+      }
+      netty.tcp {
+        hostname = "127.0.0.1"
+        port = 2550
+      }
+      # when under load we might trip a false positive on the failure detector
+      # transport-failure-detector {
+        # heartbeat-interval = 4 s
+        # acceptable-heartbeat-pause = 16s
+      # }
+    }
+
+    cluster {
+      # Remove ".tcp" when using artery.
+      seed-nodes = ["akka.tcp://opendaylight-cluster-data@127.0.0.1:2550", "akka.tcp://opendaylight-cluster-data@127.0.0.2:2550", "akka.tcp://opendaylight-cluster-data@127.0.0.3:2550"]
+
+      roles = [
+        "member-1"
+      ]
+
+    }
+
+    persistence {
+      # By default the snapshots/journal directories live in KARAF_HOME. You can choose to put it somewhere else by
+      # modifying the following two properties. The directory location specified may be a relative or absolute path.
+      # The relative path is always relative to KARAF_HOME.
+
+      # snapshot-store.local.dir = "target/snapshots"
+      # journal.leveldb.dir = "target/journal"
+
+      journal {
+        leveldb {
+          # Set native = off to use a Java-only implementation of leveldb.
+          # Note that the Java-only version is not currently considered by Akka to be production quality.
+
+          # native = off
+        }
+      }
+    }
+  }
+}
 
--- /dev/null
+{
+  "request": {
+    "mbean": "akka:type=Cluster",
+    "type": "read"
+  },
+  "value": {
+    "Leader": "akka.tcp://opendaylight-cluster-data@localhost:2550",
+    "Unreachable": "",
+    "Singleton": true,
+    "Available": true,
+    "MemberStatus": "Up",
+    "ClusterStatus": "{\n  \"members\": [\n    {\n      \"address\": \"akka.tcp://opendaylight-cluster-data@localhost:2550\",\n      \"roles\": [\n        \"dc-default\",\n        \"member-1\"\n      ],\n      \"status\": \"Up\"\n    },\n    {\n      \"address\": \"akka.tcp://opendaylight-cluster-data@localhost:2550\",\n      \"roles\": [\n        \"dc-default\",\n        \"member-2\"\n      ],\n      \"status\": \"Up\"\n    },\n    {\n      \"address\": \"akka.tcp://opendaylight-cluster-data@localhost:2550\",\n      \"roles\": [\n        \"dc-default\",\n        \"member-3\"\n      ],\n      \"status\": \"Up\"\n    }\n  ],\n  \"self-address\": \"akka.tcp://opendaylight-cluster-data@localhost:2550\",\n  \"unreachable\": []\n}\n",
+    "Members": "akka.tcp://opendaylight-cluster-data@localhost:2550"
+  },
+  "timestamp": 1575393881,
+  "status": 200
+}
\ No newline at end of file
 
--- /dev/null
+{
+  "request": {
+    "mbean": "org.opendaylight.controller:Category=Shards,name=member-1-shard-default-config,type=DistributedConfigDatastore",
+    "type": "read"
+  },
+  "value": {
+    "ReadWriteTransactionCount": 0,
+    "SnapshotIndex": 22,
+    "InMemoryJournalLogSize": 1,
+    "ReplicatedToAllIndex": -1,
+    "Leader": "member-1-shard-default-config",
+    "LastIndex": 23,
+    "RaftState": "Leader",
+    "LastCommittedTransactionTime": "2019-12-03 16:36:39.413",
+    "LastApplied": 23,
+    "PeerAddresses": "",
+    "LastLogIndex": 23,
+    "LastLeadershipChangeTime": "2019-12-03 16:36:33.460",
+    "WriteOnlyTransactionCount": 0,
+    "FollowerInitialSyncStatus": false,
+    "FollowerInfo": [],
+    "FailedReadTransactionsCount": 0,
+    "Voting": true,
+    "StatRetrievalTime": "454.8 μs",
+    "CurrentTerm": 4,
+    "LastTerm": 4,
+    "FailedTransactionsCount": 0,
+    "PendingTxCommitQueueSize": 0,
+    "VotedFor": "member-1-shard-default-config",
+    "SnapshotCaptureInitiated": false,
+    "CommittedTransactionsCount": 5,
+    "TxCohortCacheSize": 0,
+    "PeerVotingStates": "",
+    "LastLogTerm": 4,
+    "StatRetrievalError": null,
+    "CommitIndex": 23,
+    "SnapshotTerm": 4,
+    "AbortTransactionsCount": 0,
+    "ReadOnlyTransactionCount": 0,
+    "ShardName": "member-1-shard-default-config",
+    "LeadershipChangeCount": 1,
+    "InMemoryJournalDataSize": 37
+  },
+  "timestamp": 1575393787,
+  "status": 200
+}
\ No newline at end of file
 
--- /dev/null
+{
+  "request": {
+    "mbean": "org.opendaylight.controller:Category=Shards,name=member-1-shard-default-operational,type=DistributedOperationalDatastore",
+    "type": "read"
+  },
+  "value": {
+    "ReadWriteTransactionCount": 0,
+    "SnapshotIndex": 22,
+    "InMemoryJournalLogSize": 1,
+    "ReplicatedToAllIndex": -1,
+    "Leader": "member-1-shard-default-operational",
+    "LastIndex": 23,
+    "RaftState": "Leader",
+    "LastCommittedTransactionTime": "2019-12-03 16:36:39.413",
+    "LastApplied": 23,
+    "PeerAddresses": "",
+    "LastLogIndex": 23,
+    "LastLeadershipChangeTime": "2019-12-03 16:36:33.460",
+    "WriteOnlyTransactionCount": 0,
+    "FollowerInitialSyncStatus": false,
+    "FollowerInfo": [],
+    "FailedReadTransactionsCount": 0,
+    "Voting": true,
+    "StatRetrievalTime": "454.8 μs",
+    "CurrentTerm": 4,
+    "LastTerm": 4,
+    "FailedTransactionsCount": 0,
+    "PendingTxCommitQueueSize": 0,
+    "VotedFor": "member-1-shard-default-operational",
+    "SnapshotCaptureInitiated": false,
+    "CommittedTransactionsCount": 5,
+    "TxCohortCacheSize": 0,
+    "PeerVotingStates": "",
+    "LastLogTerm": 4,
+    "StatRetrievalError": null,
+    "CommitIndex": 23,
+    "SnapshotTerm": 4,
+    "AbortTransactionsCount": 0,
+    "ReadOnlyTransactionCount": 0,
+    "ShardName": "member-1-shard-default-operational",
+    "LeadershipChangeCount": 1,
+    "InMemoryJournalDataSize": 37
+  },
+  "timestamp": 1575393787,
+  "status": 200
+}
\ No newline at end of file
 
--- /dev/null
+# ============LICENSE_START=======================================================
+# openECOMP : SDN-C
+# ================================================================================
+# Copyright (C) 2019 AT&T Intellectual Property. All rights
+#                      reserved.
+# ================================================================================
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ============LICENSE_END=========================================================
+
+akka.conf.location=src/test/resources/single/akka.conf
+adm.useSsl=false
+adm.fqdn=localhost
+adm.healthcheck=/adm/healthcheck
+adm.port.http=9999
+adm.port.ssl=19999
+controller.credentials=admin:admin
+controller.useSsl=false
+controller.port.http=9999
+controller.port.ssl=19999
+controller.port.akka=2550
+mbean.cluster=/jolokia/read/akka:type=Cluster
+mbean.shardManager=/jolokia/read/org.opendaylight.controller:Category=ShardManager,name=shard-manager-config,type=DistributedConfigDatastore
+mbean.shard.config=/jolokia/read/org.opendaylight.controller:Category=Shards,name=%s,type=DistributedConfigDatastore
+site.identifier=TestODL
 
--- /dev/null
+{
+  "request": {
+    "mbean": "org.opendaylight.controller:Category=ShardManager,name=shard-manager-config,type=DistributedConfigDatastore",
+    "type": "read"
+  },
+  "value": {
+    "LocalShards": [
+      "member-1-shard-default-config"
+    ],
+    "SyncStatus": true,
+    "MemberName": "member-1"
+  },
+  "timestamp": 1575393918,
+  "status": 200
+}
\ No newline at end of file