Spaces:
Sleeping
Sleeping
Update observability.py
Browse files- observability.py +202 -141
observability.py
CHANGED
@@ -121,151 +121,212 @@ class LLMObservabilityManager:
|
|
121 |
Returns:
|
122 |
Dict containing dashboard statistics and time series data
|
123 |
"""
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
with sqlite3.connect(self.db_path) as conn:
|
132 |
-
cursor = conn.cursor()
|
133 |
-
|
134 |
-
# Build time filter
|
135 |
-
time_filter = ""
|
136 |
-
if days is not None:
|
137 |
-
time_filter = f"WHERE created_at >= datetime('now', '-{days} days')"
|
138 |
-
|
139 |
-
# Get general statistics
|
140 |
-
cursor.execute(f"""
|
141 |
-
SELECT
|
142 |
-
COUNT(*) as total_requests,
|
143 |
-
COUNT(DISTINCT conversation_id) as unique_conversations,
|
144 |
-
COUNT(DISTINCT user) as unique_users,
|
145 |
-
SUM(total_tokens) as total_tokens,
|
146 |
-
SUM(cost) as total_cost,
|
147 |
-
AVG(latency) as avg_latency,
|
148 |
-
SUM(CASE WHEN status = 'error' THEN 1 ELSE 0 END) as error_count
|
149 |
-
FROM llm_observations
|
150 |
-
{time_filter}
|
151 |
-
""")
|
152 |
-
general_stats = dict(zip([col[0] for col in cursor.description], cursor.fetchone()))
|
153 |
-
|
154 |
-
# Get model distribution
|
155 |
-
cursor.execute(f"""
|
156 |
-
SELECT model, COUNT(*) as count
|
157 |
-
FROM llm_observations
|
158 |
-
{time_filter}
|
159 |
-
GROUP BY model
|
160 |
-
ORDER BY count DESC
|
161 |
-
""")
|
162 |
-
model_distribution = {row[0]: row[1] for row in cursor.fetchall()}
|
163 |
-
|
164 |
-
# Get average tokens per request
|
165 |
-
cursor.execute(f"""
|
166 |
-
SELECT
|
167 |
-
AVG(prompt_tokens) as avg_prompt_tokens,
|
168 |
-
AVG(completion_tokens) as avg_completion_tokens
|
169 |
-
FROM llm_observations
|
170 |
-
{time_filter}
|
171 |
-
""")
|
172 |
-
token_averages = dict(zip([col[0] for col in cursor.description], cursor.fetchone()))
|
173 |
-
|
174 |
-
# Get top users by request count
|
175 |
-
cursor.execute(f"""
|
176 |
-
SELECT user, COUNT(*) as request_count,
|
177 |
-
SUM(total_tokens) as total_tokens,
|
178 |
-
SUM(cost) as total_cost
|
179 |
-
FROM llm_observations
|
180 |
-
{time_filter}
|
181 |
-
GROUP BY user
|
182 |
-
ORDER BY request_count DESC
|
183 |
-
LIMIT 5
|
184 |
-
""")
|
185 |
-
top_users = [
|
186 |
-
{
|
187 |
-
"user": row[0],
|
188 |
-
"request_count": row[1],
|
189 |
-
"total_tokens": row[2],
|
190 |
-
"total_cost": round(row[3], 2)
|
191 |
-
}
|
192 |
-
for row in cursor.fetchall()
|
193 |
-
]
|
194 |
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
format_string = time_series_format[time_series_interval]
|
204 |
-
|
205 |
-
cursor.execute(f"""
|
206 |
-
SELECT
|
207 |
-
strftime('{format_string}', created_at) as time_bucket,
|
208 |
-
COUNT(*) as request_count,
|
209 |
-
SUM(total_tokens) as total_tokens,
|
210 |
-
SUM(cost) as total_cost,
|
211 |
-
AVG(latency) as avg_latency,
|
212 |
-
COUNT(DISTINCT user) as unique_users,
|
213 |
-
SUM(CASE WHEN status = 'error' THEN 1 ELSE 0 END) as error_count
|
214 |
-
FROM llm_observations
|
215 |
-
{time_filter}
|
216 |
-
GROUP BY time_bucket
|
217 |
-
ORDER BY time_bucket
|
218 |
-
""")
|
219 |
-
|
220 |
-
time_series = [
|
221 |
-
{
|
222 |
-
"timestamp": row[0],
|
223 |
-
"request_count": row[1],
|
224 |
-
"total_tokens": row[2],
|
225 |
-
"total_cost": round(row[3], 2),
|
226 |
-
"avg_latency": round(row[4], 2),
|
227 |
-
"unique_users": row[5],
|
228 |
-
"error_count": row[6]
|
229 |
-
}
|
230 |
-
for row in cursor.fetchall()
|
231 |
-
]
|
232 |
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
"
|
241 |
-
|
242 |
-
|
243 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
244 |
}
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
251 |
|
|
|
|
|
|
|
|
|
|
|
252 |
return {
|
253 |
-
"
|
254 |
-
"
|
255 |
-
|
256 |
-
"
|
257 |
-
|
258 |
-
"
|
259 |
-
"avg_latency": round(general_stats["avg_latency"], 2),
|
260 |
-
"error_rate": round(general_stats["error_count"] / general_stats["total_requests"] * 100, 2)
|
261 |
-
},
|
262 |
-
"model_distribution": model_distribution,
|
263 |
-
"token_metrics": {
|
264 |
-
"avg_prompt_tokens": round(token_averages["avg_prompt_tokens"], 2),
|
265 |
-
"avg_completion_tokens": round(token_averages["avg_completion_tokens"], 2)
|
266 |
-
},
|
267 |
-
"top_users": top_users,
|
268 |
-
"time_series": time_series,
|
269 |
-
"trends": trends
|
270 |
}
|
271 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
Returns:
|
122 |
Dict containing dashboard statistics and time series data
|
123 |
"""
|
124 |
+
def safe_round(value: Any, decimals: int = 2) -> float:
|
125 |
+
"""Safely round a value, returning 0 if the value is None or invalid."""
|
126 |
+
try:
|
127 |
+
return round(float(value), decimals) if value is not None else 0.0
|
128 |
+
except (TypeError, ValueError):
|
129 |
+
return 0.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
|
131 |
+
def safe_divide(numerator: Any, denominator: Any, decimals: int = 2) -> float:
|
132 |
+
"""Safely divide two numbers, handling None and zero division."""
|
133 |
+
try:
|
134 |
+
if not denominator or denominator is None:
|
135 |
+
return 0.0
|
136 |
+
return round(float(numerator or 0) / float(denominator), decimals)
|
137 |
+
except (TypeError, ValueError):
|
138 |
+
return 0.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
|
140 |
+
try:
|
141 |
+
with sqlite3.connect(self.db_path) as conn:
|
142 |
+
cursor = conn.cursor()
|
143 |
+
|
144 |
+
# Build time filter
|
145 |
+
time_filter = ""
|
146 |
+
if days is not None:
|
147 |
+
time_filter = f"WHERE created_at >= datetime('now', '-{days} days')"
|
148 |
+
|
149 |
+
# Get general statistics
|
150 |
+
cursor.execute(f"""
|
151 |
+
SELECT
|
152 |
+
COUNT(*) as total_requests,
|
153 |
+
COUNT(DISTINCT conversation_id) as unique_conversations,
|
154 |
+
COUNT(DISTINCT user) as unique_users,
|
155 |
+
SUM(total_tokens) as total_tokens,
|
156 |
+
SUM(cost) as total_cost,
|
157 |
+
AVG(latency) as avg_latency,
|
158 |
+
SUM(CASE WHEN status = 'error' THEN 1 ELSE 0 END) as error_count
|
159 |
+
FROM llm_observations
|
160 |
+
{time_filter}
|
161 |
+
""")
|
162 |
+
row = cursor.fetchone()
|
163 |
+
if not row:
|
164 |
+
return self._get_empty_statistics()
|
165 |
+
|
166 |
+
general_stats = dict(zip([col[0] for col in cursor.description], row))
|
167 |
+
|
168 |
+
# Get model distribution
|
169 |
+
cursor.execute(f"""
|
170 |
+
SELECT model, COUNT(*) as count
|
171 |
+
FROM llm_observations
|
172 |
+
{time_filter}
|
173 |
+
GROUP BY model
|
174 |
+
ORDER BY count DESC
|
175 |
+
""")
|
176 |
+
model_distribution = {row[0]: row[1] for row in cursor.fetchall()} if cursor.fetchall() else {}
|
177 |
+
|
178 |
+
# Get average tokens per request
|
179 |
+
cursor.execute(f"""
|
180 |
+
SELECT
|
181 |
+
AVG(prompt_tokens) as avg_prompt_tokens,
|
182 |
+
AVG(completion_tokens) as avg_completion_tokens
|
183 |
+
FROM llm_observations
|
184 |
+
{time_filter}
|
185 |
+
""")
|
186 |
+
token_averages = dict(zip([col[0] for col in cursor.description], cursor.fetchone()))
|
187 |
+
|
188 |
+
# Get top users by request count
|
189 |
+
cursor.execute(f"""
|
190 |
+
SELECT user, COUNT(*) as request_count,
|
191 |
+
SUM(total_tokens) as total_tokens,
|
192 |
+
SUM(cost) as total_cost
|
193 |
+
FROM llm_observations
|
194 |
+
{time_filter}
|
195 |
+
GROUP BY user
|
196 |
+
ORDER BY request_count DESC
|
197 |
+
LIMIT 5
|
198 |
+
""")
|
199 |
+
top_users = [
|
200 |
+
{
|
201 |
+
"user": row[0],
|
202 |
+
"request_count": row[1],
|
203 |
+
"total_tokens": row[2] or 0,
|
204 |
+
"total_cost": safe_round(row[3])
|
205 |
+
}
|
206 |
+
for row in cursor.fetchall()
|
207 |
+
]
|
208 |
+
|
209 |
+
# Get time series data
|
210 |
+
time_series_format = {
|
211 |
+
'hour': "%Y-%m-%d %H:00:00",
|
212 |
+
'day': "%Y-%m-%d",
|
213 |
+
'week': "%Y-%W",
|
214 |
+
'month': "%Y-%m"
|
215 |
}
|
216 |
+
|
217 |
+
format_string = time_series_format.get(time_series_interval, "%Y-%m-%d")
|
218 |
+
|
219 |
+
cursor.execute(f"""
|
220 |
+
SELECT
|
221 |
+
strftime('{format_string}', created_at) as time_bucket,
|
222 |
+
COUNT(*) as request_count,
|
223 |
+
SUM(total_tokens) as total_tokens,
|
224 |
+
SUM(cost) as total_cost,
|
225 |
+
AVG(latency) as avg_latency,
|
226 |
+
COUNT(DISTINCT user) as unique_users,
|
227 |
+
SUM(CASE WHEN status = 'error' THEN 1 ELSE 0 END) as error_count
|
228 |
+
FROM llm_observations
|
229 |
+
{time_filter}
|
230 |
+
GROUP BY time_bucket
|
231 |
+
ORDER BY time_bucket
|
232 |
+
""")
|
233 |
+
|
234 |
+
time_series = [
|
235 |
+
{
|
236 |
+
"timestamp": row[0],
|
237 |
+
"request_count": row[1] or 0,
|
238 |
+
"total_tokens": row[2] or 0,
|
239 |
+
"total_cost": safe_round(row[3]),
|
240 |
+
"avg_latency": safe_round(row[4]),
|
241 |
+
"unique_users": row[5] or 0,
|
242 |
+
"error_count": row[6] or 0
|
243 |
+
}
|
244 |
+
for row in cursor.fetchall()
|
245 |
+
]
|
246 |
+
|
247 |
+
# Calculate trends safely
|
248 |
+
trends = self._calculate_trends(time_series)
|
249 |
+
|
250 |
+
return {
|
251 |
+
"general_stats": {
|
252 |
+
"total_requests": general_stats["total_requests"] or 0,
|
253 |
+
"unique_conversations": general_stats["unique_conversations"] or 0,
|
254 |
+
"unique_users": general_stats["unique_users"] or 0,
|
255 |
+
"total_tokens": general_stats["total_tokens"] or 0,
|
256 |
+
"total_cost": safe_round(general_stats["total_cost"]),
|
257 |
+
"avg_latency": safe_round(general_stats["avg_latency"]),
|
258 |
+
"error_rate": safe_round(
|
259 |
+
safe_divide(general_stats["error_count"], general_stats["total_requests"]) * 100
|
260 |
+
)
|
261 |
+
},
|
262 |
+
"model_distribution": model_distribution,
|
263 |
+
"token_metrics": {
|
264 |
+
"avg_prompt_tokens": safe_round(token_averages["avg_prompt_tokens"]),
|
265 |
+
"avg_completion_tokens": safe_round(token_averages["avg_completion_tokens"])
|
266 |
+
},
|
267 |
+
"top_users": top_users,
|
268 |
+
"time_series": time_series,
|
269 |
+
"trends": trends
|
270 |
}
|
271 |
+
except sqlite3.Error as e:
|
272 |
+
logger.error(f"Database error in get_dashboard_statistics: {e}")
|
273 |
+
return self._get_empty_statistics()
|
274 |
+
except Exception as e:
|
275 |
+
logger.error(f"Error in get_dashboard_statistics: {e}")
|
276 |
+
return self._get_empty_statistics()
|
277 |
+
|
278 |
+
def _get_empty_statistics(self) -> Dict[str, Any]:
|
279 |
+
"""Return an empty statistics structure when no data is available."""
|
280 |
+
return {
|
281 |
+
"general_stats": {
|
282 |
+
"total_requests": 0,
|
283 |
+
"unique_conversations": 0,
|
284 |
+
"unique_users": 0,
|
285 |
+
"total_tokens": 0,
|
286 |
+
"total_cost": 0.0,
|
287 |
+
"avg_latency": 0.0,
|
288 |
+
"error_rate": 0.0
|
289 |
+
},
|
290 |
+
"model_distribution": {},
|
291 |
+
"token_metrics": {
|
292 |
+
"avg_prompt_tokens": 0.0,
|
293 |
+
"avg_completion_tokens": 0.0
|
294 |
+
},
|
295 |
+
"top_users": [],
|
296 |
+
"time_series": [],
|
297 |
+
"trends": {
|
298 |
+
"request_trend": 0.0,
|
299 |
+
"cost_trend": 0.0,
|
300 |
+
"token_trend": 0.0
|
301 |
+
}
|
302 |
+
}
|
303 |
|
304 |
+
def _calculate_trends(self, time_series: List[Dict[str, Any]]) -> Dict[str, float]:
|
305 |
+
"""Calculate trends safely from time series data."""
|
306 |
+
if len(time_series) >= 2:
|
307 |
+
current = time_series[-1]
|
308 |
+
previous = time_series[-2]
|
309 |
return {
|
310 |
+
"request_trend": self._calculate_percentage_change(
|
311 |
+
previous["request_count"], current["request_count"]),
|
312 |
+
"cost_trend": self._calculate_percentage_change(
|
313 |
+
previous["total_cost"], current["total_cost"]),
|
314 |
+
"token_trend": self._calculate_percentage_change(
|
315 |
+
previous["total_tokens"], current["total_tokens"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
316 |
}
|
317 |
+
return {
|
318 |
+
"request_trend": 0.0,
|
319 |
+
"cost_trend": 0.0,
|
320 |
+
"token_trend": 0.0
|
321 |
+
}
|
322 |
+
|
323 |
+
def _calculate_percentage_change(self, old_value: Any, new_value: Any) -> float:
|
324 |
+
"""Calculate percentage change between two values safely."""
|
325 |
+
try:
|
326 |
+
old_value = float(old_value or 0)
|
327 |
+
new_value = float(new_value or 0)
|
328 |
+
if old_value == 0:
|
329 |
+
return 100.0 if new_value > 0 else 0.0
|
330 |
+
return round(((new_value - old_value) / old_value) * 100, 2)
|
331 |
+
except (TypeError, ValueError):
|
332 |
+
return 0.0
|