Update app.py
Browse files
app.py
CHANGED
@@ -14,31 +14,47 @@ import os
|
|
14 |
|
15 |
# --- Ensure chat_log.txt exists ---
|
16 |
log_file = "chat_log.txt"
|
17 |
-
|
18 |
-
|
19 |
-
|
|
|
|
|
|
|
|
|
20 |
|
21 |
# --- Logging Setup ---
|
22 |
-
|
23 |
-
logging.
|
|
|
|
|
|
|
24 |
|
25 |
# Load the model
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
28 |
|
29 |
# Load the dataset
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
32 |
|
33 |
# --- Prometheus Metrics Setup ---
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
|
|
|
|
42 |
|
43 |
# --- Queue and Metrics ---
|
44 |
chat_queue = Queue() # Define chat_queue globally
|
|
|
14 |
|
15 |
# --- Ensure chat_log.txt exists ---
|
16 |
log_file = "chat_log.txt"
|
17 |
+
try:
|
18 |
+
if not os.path.exists(log_file):
|
19 |
+
with open(log_file, 'w') as f:
|
20 |
+
f.write("Log file created.\n") # Write a simple message to the log file
|
21 |
+
print(f"{log_file} is ready for logging.")
|
22 |
+
except Exception as e:
|
23 |
+
print(f"Error creating log file: {e}")
|
24 |
|
25 |
# --- Logging Setup ---
|
26 |
+
try:
|
27 |
+
logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
|
28 |
+
logging.debug("Logging setup complete.")
|
29 |
+
except Exception as e:
|
30 |
+
print(f"Error setting up logging: {e}")
|
31 |
|
32 |
# Load the model
|
33 |
+
try:
|
34 |
+
ner_pipeline = pipeline("ner", model="Sevixdd/roberta-base-finetuned-ner")
|
35 |
+
logging.debug("NER pipeline loaded.")
|
36 |
+
except Exception as e:
|
37 |
+
logging.error(f"Error loading NER pipeline: {e}")
|
38 |
|
39 |
# Load the dataset
|
40 |
+
try:
|
41 |
+
dataset = load_dataset("surrey-nlp/PLOD-filtered")
|
42 |
+
logging.debug("Dataset loaded.")
|
43 |
+
except Exception as e:
|
44 |
+
logging.error(f"Error loading dataset: {e}")
|
45 |
|
46 |
# --- Prometheus Metrics Setup ---
|
47 |
+
try:
|
48 |
+
REQUEST_COUNT = Counter('gradio_request_count', 'Total number of requests')
|
49 |
+
REQUEST_LATENCY = Histogram('gradio_request_latency_seconds', 'Request latency in seconds')
|
50 |
+
ERROR_COUNT = Counter('gradio_error_count', 'Total number of errors')
|
51 |
+
RESPONSE_SIZE = Histogram('gradio_response_size_bytes', 'Size of responses in bytes')
|
52 |
+
CPU_USAGE = Gauge('system_cpu_usage_percent', 'System CPU usage in percent')
|
53 |
+
MEM_USAGE = Gauge('system_memory_usage_percent', 'System memory usage in percent')
|
54 |
+
QUEUE_LENGTH = Gauge('chat_queue_length', 'Length of the chat queue')
|
55 |
+
logging.debug("Prometheus metrics setup complete.")
|
56 |
+
except Exception as e:
|
57 |
+
logging.error(f"Error setting up Prometheus metrics: {e}")
|
58 |
|
59 |
# --- Queue and Metrics ---
|
60 |
chat_queue = Queue() # Define chat_queue globally
|