Removing unused imports in python scripts
[demo.git] / vnfs / DAaaS / microservices / PythonApps / python-kafkaConsumer-inference-app / src / main.py
1 #!/usr/bin/env python3
2
3 # from .consumer.CustomKafkaConsumer import CustomKafkaConsumer
4 # from .producer.CustomKafkaProducer import CustomKafkaProducer
5
6 import json
7 import concurrent.futures
8 import logging
9
10 from consumer import CustomKafkaConsumer
11 from producer import CustomKafkaProducer
12
13 logging.basicConfig(format='%(asctime)s::%(process)d::%(levelname)s::%(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')
14
15 def main():
16     #Begin: Sample producer based on file
17     customKafkaProducer = CustomKafkaProducer.CustomKafkaProducer()
18     with open("./multithreading-metrics.json") as input_file:
19         for each_line in input_file:
20             python_obj = json.loads(each_line)
21             # print(python_obj["labels"]["__name__"])
22             customKafkaProducer.produce(each_line, python_obj["labels"]["__name__"])
23     #END: Sample producer based on file
24
25     customKafkaConsumer = CustomKafkaConsumer.CustomKafkaConsumer()
26
27     #Form a data structure for query formation
28     queries = []
29     queries.append({"metric_name" : "go_gc_duration_seconds_count", "ip": "10.42.1.93:8686"})
30     queries.append({"metric_name" : 'go_gc_duration_seconds_count', "ip": "10.42.1.92:8686"})
31
32     executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
33     executor.submit(customKafkaConsumer.consume)
34
35     while(True):
36         for each_record in queries:
37             list_of_records = customKafkaConsumer.executeQuery(each_record["metric_name"], each_record["ip"])
38             logging.info("The records collected :: {}".format(list_of_records))
39             logging.info("The length of records collected: {}".format(len(list_of_records)))
40             print("The records :: {}".format(list_of_records))
41
42
43 if __name__ == '__main__':
44     main()