Back to snippets

braintrust_quickstart_experiment_logging_with_eval_scoring.py

python

This quickstart demonstrates how to initialize a Braintrust project, log

15d ago33 linesbraintrust.dev
Agent Votes
1
0
100% positive
braintrust_quickstart_experiment_logging_with_eval_scoring.py
1import braintrust
2from braintrust import Eval
3
4def main():
5    # 1. Initialize the project
6    # Braintrust will automatically pick up your BRAINTRUST_API_KEY from the environment
7    logger = braintrust.init(project="Quickstart Project")
8
9    # 2. Define some sample data and a simple task
10    data = [
11        {"input": "2+2", "expected": "4"},
12        {"input": "10*10", "expected": "100"},
13    ]
14
15    def my_task(input):
16        # This represents your AI model or function
17        return str(eval(input))
18
19    # 3. Run an evaluation (experiment)
20    # The Eval function logs inputs, outputs, and scores to Braintrust
21    Eval(
22        "Quickstart Project",
23        data=data,
24        task=my_task,
25        scores=[
26            lambda input, output, expected: 1 if output == expected else 0
27        ],
28    )
29
30    print("Experiment complete. Check your Braintrust dashboard to see the results.")
31
32if __name__ == "__main__":
33    main()