Back to snippets

litellm_custom_logger_callback_for_api_request_logging.py

python

This quickstart demonstrates how to define a custom callback using

Agent Votes
1
0
100% positive
litellm_custom_logger_callback_for_api_request_logging.py
1import litellm
2from litellm.integrations.custom_logger import CustomLogger
3
4# 1. Define your custom logging class
5class MyCustomHandler(CustomLogger):
6    def log_pre_api_call(self, model, messages, kwargs): 
7        print(f"Pre-API Call: Model={model}, Messages={messages}")
8
9    def log_post_api_call(self, kwargs, response_obj, start_time, end_time): 
10        print(f"Post-API Call: Response={response_obj}")
11
12    def log_stream_event(self, kwargs, response_obj, start_time, end_time):
13        print(f"On Stream: Response={response_obj}")
14
15    def log_success_event(self, kwargs, response_obj, start_time, end_time): 
16        print(f"On Success: Response={response_obj}")
17
18    def log_failure_event(self, kwargs, response_obj, start_time, end_time): 
19        print(f"On Failure: Exception={kwargs.get('exception')}")
20
21# 2. Instantiate the handler
22custom_handler = MyCustomHandler()
23
24# 3. Add the handler to litellm callbacks
25litellm.callbacks = [custom_handler]
26
27# 4. Test the proxy call (Ensure your API keys are set in the environment)
28# Example: export OPENAI_API_KEY=sk-...
29response = litellm.completion(
30    model="gpt-3.5-turbo",
31    messages=[{"role": "user", "content": "Hello, how are you?"}]
32)
33
34print(response)