3 # from .consumer.CustomKafkaConsumer import CustomKafkaConsumer
4 # from .producer.CustomKafkaProducer import CustomKafkaProducer
10 import concurrent.futures
13 from consumer import CustomKafkaConsumer
14 from producer import CustomKafkaProducer
16 logging.basicConfig(format='%(asctime)s::%(process)d::%(levelname)s::%(message)s', level=logging.INFO, datefmt='%d-%b-%y %H:%M:%S')
19 #Begin: Sample producer based on file
20 customKafkaProducer = CustomKafkaProducer.CustomKafkaProducer()
21 with open("./multithreading-metrics.json") as input_file:
22 for each_line in input_file:
23 python_obj = json.loads(each_line)
24 # print(python_obj["labels"]["__name__"])
25 customKafkaProducer.produce(each_line, python_obj["labels"]["__name__"])
26 #END: Sample producer based on file
28 customKafkaConsumer = CustomKafkaConsumer.CustomKafkaConsumer()
30 #Form a data structure for query formation
32 queries.append({"metric_name" : "go_gc_duration_seconds_count", "ip": "10.42.1.93:8686"})
33 queries.append({"metric_name" : 'go_gc_duration_seconds_count', "ip": "10.42.1.92:8686"})
35 executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
36 executor.submit(customKafkaConsumer.consume)
39 for each_record in queries:
40 list_of_records = customKafkaConsumer.executeQuery(each_record["metric_name"], each_record["ip"])
41 logging.info("The records collected :: {}".format(list_of_records))
42 logging.info("The length of records collected: {}".format(len(list_of_records)))
43 print("The records :: {}".format(list_of_records))
46 if __name__ == '__main__':