diff --git a/fastdeploy/_loop.py b/fastdeploy/_loop.py
index 688ac05..44b3a8a 100644
--- a/fastdeploy/_loop.py
+++ b/fastdeploy/_loop.py
@@ -186,6 +186,8 @@ def start_loop(
     """
     )
 
+    prediction_loop_started_at = time.time()
+
     while True:
         """
         Set timedout_in_queue to True for all the predictions that have been in the queue for more than timeout_time seconds
@@ -226,9 +228,12 @@ def start_loop(
 
         _utils.logger.debug(f"Processing batch {unique_id_wise_input_count}")
 
+        process_batch_started_at = time.time()
         results, last_predictor_success, received_at, predicted_at = process_batch(
             predictor, input_batch, optimal_batch_size
         )
+        process_batch_ended_at = time.time()
+
         unique_id_wise_results = prepare_results(
             unique_id_wise_input_count,
             results,
@@ -244,6 +249,16 @@ def start_loop(
             f"Updated results predictor {predictor_sequence}: {list(unique_id_wise_results)}"
         )
 
+        _utils.GLOBAL_METRICS_INDEX.math(
+            "total_predictor_run_for_hours",
+            (process_batch_ended_at - process_batch_started_at) / 3600,
+            "+=",
+        )
+
+        _utils.GLOBAL_METRICS_INDEX["total_predictor_up_for_hours"] = (
+            time.time() - prediction_loop_started_at
+        ) / 3600
+
 
 if __name__ == "__main__":
     import sys
diff --git a/fastdeploy/_rest.py b/fastdeploy/_rest.py
index ab9c9b8..68a02d9 100644
--- a/fastdeploy/_rest.py
+++ b/fastdeploy/_rest.py
@@ -251,17 +251,13 @@ def on_get(self, req, resp):
                 ) / requests_received_in_last_x_seconds_that_are_successful
 
         prometheus_text = f"""
-# HELP pending_requests The number of pending requests.
-# TYPE pending_requests gauge
-pending_requests {_utils.MAIN_INDEX.count(query={"-1.predicted_at": 0, "last_predictor_success": True})}
+# HELP total_predictor_run_for_hours Total hours the predictor has been actively running predictions since start.
+# TYPE total_predictor_run_for_hours counter
+total_predictor_run_for_hours {_utils.GLOBAL_METRICS_INDEX["total_predictor_run_for_hours"]}
 
-# HELP failed_requests The number of failed requests.
-# TYPE failed_requests gauge
-failed_requests {_utils.MAIN_INDEX.count(query={"last_predictor_success": False})}
-
-# HELP successful_requests The number of failed requests.
-# TYPE successful_requests gauge
-successful_requests {_utils.MAIN_INDEX.count(query={"-1.predicted_at": {"$ne": 0}, "last_predictor_success": True})}
+# HELP total_predictor_up_for_hours Total hours the predictor has been up since start.
+# TYPE total_predictor_up_for_hours counter 
+total_predictor_up_for_hours {_utils.GLOBAL_METRICS_INDEX["total_predictor_up_for_hours"]}
 
 # HELP requests_received_in_last_x_seconds The number of requests received in last {_LAST_X_SECONDS} seconds.
 # TYPE requests_received_in_last_x_seconds gauge
@@ -394,18 +390,6 @@ def on_get(self, req, resp):
             }
 
 
-class Failed(object):
-    def on_get(self, req, resp):
-        last_n_failed = int(req.params.get("last_n_failed", 10))
-        failed_inputs = _utils.MAIN_INDEX.search(
-            query={"last_predictor_success": False},
-            n=last_n_failed,
-            select_keys=["-1.inputs"],
-        )
-
-        resp.media = failed_inputs
-
-
 app = falcon.App(
     middleware=falcon.CORSMiddleware(allow_origins="*", allow_credentials="*"),
 )
diff --git a/fastdeploy/_utils.py b/fastdeploy/_utils.py
index 1277b9c..ee82be6 100644
--- a/fastdeploy/_utils.py
+++ b/fastdeploy/_utils.py
@@ -114,6 +114,11 @@
 MAIN_INDEX.optimize_for_query(["timedout_in_queue"])
 
 
+GLOBAL_METRICS_INDEX = KVIndex(os.path.join("fastdeploy_dbs", f"global_metrics_index.db"))
+GLOBAL_METRICS_INDEX["total_predictor_run_for_hours"] = 0
+GLOBAL_METRICS_INDEX["total_predictor_up_for_hours"] = 0
+
+
 def warmup(predictor, example_input, n=3):
     """
     Run warmup prediction on the model.
diff --git a/setup.py b/setup.py
index 93f2f8a..5e578c7 100644
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
 EMAIL = "praneeth@bpraneeth.com"
 AUTHOR = "BEDAPUDI PRANEETH"
 REQUIRES_PYTHON = ">=3.6.0"
-VERSION = "3.0.22"
+VERSION = "3.0.23"
 
 # What packages are required for this module to be executed?
 REQUIRED = ["falcon", "liteindex==0.0.3.2.dev4", "zstandard", "gunicorn[gevent]", "msgpack"]