-
Notifications
You must be signed in to change notification settings - Fork 608
Expand file tree
/
Copy pathlitellm.py
More file actions
328 lines (281 loc) · 11.6 KB
/
litellm.py
File metadata and controls
328 lines (281 loc) · 11.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
import copy
from typing import TYPE_CHECKING
import sentry_sdk
from sentry_sdk import consts
from sentry_sdk.ai.monitoring import record_token_usage
from sentry_sdk.ai.utils import (
get_start_span_function,
set_data_normalized,
truncate_and_annotate_messages,
transform_openai_content_part,
truncate_and_annotate_embedding_inputs,
)
from sentry_sdk.consts import SPANDATA
from sentry_sdk.integrations import DidNotEnable, Integration
from sentry_sdk.scope import should_send_default_pii
from sentry_sdk.utils import event_from_exception
if TYPE_CHECKING:
from typing import Any, Dict, List
from datetime import datetime
try:
import litellm # type: ignore[import-not-found]
from litellm import input_callback, success_callback, failure_callback
except ImportError:
raise DidNotEnable("LiteLLM not installed")
def _get_metadata_dict(kwargs: "Dict[str, Any]") -> "Dict[str, Any]":
"""Get the metadata dictionary from the kwargs."""
litellm_params = kwargs.setdefault("litellm_params", {})
# we need this weird little dance, as metadata might be set but may be None initially
metadata = litellm_params.get("metadata")
if metadata is None:
metadata = {}
litellm_params["metadata"] = metadata
return metadata
def _convert_message_parts(messages: "List[Dict[str, Any]]") -> "List[Dict[str, Any]]":
"""
Convert the message parts from OpenAI format to the `gen_ai.request.messages` format
using the OpenAI-specific transformer (LiteLLM uses OpenAI's message format).
Deep copies messages to avoid mutating original kwargs.
"""
# Deep copy to avoid mutating original messages from kwargs
messages = copy.deepcopy(messages)
for message in messages:
if not isinstance(message, dict):
continue
content = message.get("content")
if isinstance(content, (list, tuple)):
transformed = []
for item in content:
if isinstance(item, dict):
result = transform_openai_content_part(item)
# If transformation succeeded, use the result; otherwise keep original
transformed.append(result if result is not None else item)
else:
transformed.append(item)
message["content"] = transformed
return messages
def _input_callback(kwargs: "Dict[str, Any]") -> None:
"""Handle the start of a request."""
integration = sentry_sdk.get_client().get_integration(LiteLLMIntegration)
if integration is None:
return
# Get key parameters
full_model = kwargs.get("model", "")
try:
model, provider, _, _ = litellm.get_llm_provider(full_model)
except Exception:
model = full_model
provider = "unknown"
call_type = kwargs.get("call_type", None)
if call_type == "embedding":
operation = "embeddings"
else:
operation = "chat"
# Start a new span/transaction
span = get_start_span_function()(
op=(
consts.OP.GEN_AI_CHAT
if operation == "chat"
else consts.OP.GEN_AI_EMBEDDINGS
),
name=f"{operation} {model}",
origin=LiteLLMIntegration.origin,
)
span.__enter__()
# Store span for later
_get_metadata_dict(kwargs)["_sentry_span"] = span
# Set basic data
set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, provider)
set_data_normalized(span, SPANDATA.GEN_AI_OPERATION_NAME, operation)
# Record input/messages if allowed
if should_send_default_pii() and integration.include_prompts:
if operation == "embeddings":
# For embeddings, look for the 'input' parameter
embedding_input = kwargs.get("input")
if embedding_input:
scope = sentry_sdk.get_current_scope()
# Normalize to list format
input_list = (
embedding_input
if isinstance(embedding_input, list)
else [embedding_input]
)
messages_data = truncate_and_annotate_embedding_inputs(
input_list, span, scope
)
if messages_data is not None:
set_data_normalized(
span,
SPANDATA.GEN_AI_EMBEDDINGS_INPUT,
messages_data,
unpack=False,
)
else:
# For chat, look for the 'messages' parameter
messages = kwargs.get("messages", [])
if messages:
scope = sentry_sdk.get_current_scope()
messages = _convert_message_parts(messages)
messages_data = truncate_and_annotate_messages(messages, span, scope)
if messages_data is not None:
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
messages_data,
unpack=False,
)
# Record other parameters
params = {
"model": SPANDATA.GEN_AI_REQUEST_MODEL,
"stream": SPANDATA.GEN_AI_RESPONSE_STREAMING,
"max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS,
"presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY,
"frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY,
"temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE,
"top_p": SPANDATA.GEN_AI_REQUEST_TOP_P,
}
for key, attribute in params.items():
value = kwargs.get(key)
if value is not None:
set_data_normalized(span, attribute, value)
# Record LiteLLM-specific parameters
litellm_params = {
"api_base": kwargs.get("api_base"),
"api_version": kwargs.get("api_version"),
"custom_llm_provider": kwargs.get("custom_llm_provider"),
}
for key, value in litellm_params.items():
if value is not None:
set_data_normalized(span, f"gen_ai.litellm.{key}", value)
def _success_callback(
kwargs: "Dict[str, Any]",
completion_response: "Any",
start_time: "datetime",
end_time: "datetime",
) -> None:
"""Handle successful completion."""
span = _get_metadata_dict(kwargs).get("_sentry_span")
if span is None:
return
integration = sentry_sdk.get_client().get_integration(LiteLLMIntegration)
if integration is None:
return
try:
# Record model information
if hasattr(completion_response, "model"):
set_data_normalized(
span, SPANDATA.GEN_AI_RESPONSE_MODEL, completion_response.model
)
# Record response content if allowed
if should_send_default_pii() and integration.include_prompts:
if hasattr(completion_response, "choices"):
response_messages = []
for choice in completion_response.choices:
if hasattr(choice, "message"):
if hasattr(choice.message, "model_dump"):
response_messages.append(choice.message.model_dump())
elif hasattr(choice.message, "dict"):
response_messages.append(choice.message.dict())
else:
# Fallback for basic message objects
msg = {}
if hasattr(choice.message, "role"):
msg["role"] = choice.message.role
if hasattr(choice.message, "content"):
msg["content"] = choice.message.content
if hasattr(choice.message, "tool_calls"):
msg["tool_calls"] = choice.message.tool_calls
response_messages.append(msg)
if response_messages:
set_data_normalized(
span, SPANDATA.GEN_AI_RESPONSE_TEXT, response_messages
)
# Record token usage
if hasattr(completion_response, "usage"):
usage = completion_response.usage
record_token_usage(
span,
input_tokens=getattr(usage, "prompt_tokens", None),
output_tokens=getattr(usage, "completion_tokens", None),
total_tokens=getattr(usage, "total_tokens", None),
)
finally:
is_streaming = kwargs.get("stream")
# Callback is fired multiple times when streaming a response.
# Streaming flag checked at https://github.com/BerriAI/litellm/blob/33c3f13443eaf990ac8c6e3da78bddbc2b7d0e7a/litellm/litellm_core_utils/litellm_logging.py#L1603
if is_streaming is not True or "complete_streaming_response" in kwargs:
span.__exit__(None, None, None)
def _failure_callback(
kwargs: "Dict[str, Any]",
exception: Exception,
start_time: "datetime",
end_time: "datetime",
) -> None:
"""Handle request failure."""
span = _get_metadata_dict(kwargs).get("_sentry_span")
if span is None:
return
try:
# Capture the exception
event, hint = event_from_exception(
exception,
client_options=sentry_sdk.get_client().options,
mechanism={"type": "litellm", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
finally:
# Always finish the span and clean up
span.__exit__(type(exception), exception, None)
class LiteLLMIntegration(Integration):
"""
LiteLLM integration for Sentry.
This integration automatically captures LiteLLM API calls and sends them to Sentry
for monitoring and error tracking. It supports all 100+ LLM providers that LiteLLM
supports, including OpenAI, Anthropic, Google, Cohere, and many others.
Features:
- Automatic exception capture for all LiteLLM calls
- Token usage tracking across all providers
- Provider detection and attribution
- Input/output message capture (configurable)
- Streaming response support
- Cost tracking integration
Usage:
```python
import litellm
import sentry_sdk
# Initialize Sentry with the LiteLLM integration
sentry_sdk.init(
dsn="your-dsn",
send_default_pii=True
integrations=[
sentry_sdk.integrations.LiteLLMIntegration(
include_prompts=True # Set to False to exclude message content
)
]
)
# All LiteLLM calls will now be monitored
response = litellm.completion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello!"}]
)
```
Configuration:
- include_prompts (bool): Whether to include prompts and responses in spans.
Defaults to True. Set to False to exclude potentially sensitive data.
"""
identifier = "litellm"
origin = f"auto.ai.{identifier}"
def __init__(self: "LiteLLMIntegration", include_prompts: bool = True) -> None:
self.include_prompts = include_prompts
@staticmethod
def setup_once() -> None:
"""Set up LiteLLM callbacks for monitoring."""
litellm.input_callback = input_callback or []
if _input_callback not in litellm.input_callback:
litellm.input_callback.append(_input_callback)
litellm.success_callback = success_callback or []
if _success_callback not in litellm.success_callback:
litellm.success_callback.append(_success_callback)
litellm.failure_callback = failure_callback or []
if _failure_callback not in litellm.failure_callback:
litellm.failure_callback.append(_failure_callback)