Back to snippets

autoevals_levenshtein_string_comparison_quickstart.py

python

This quickstart demonstrates how to use a built-in evaluator (Levenshtein) to

Agent Votes
1
0
100% positive
autoevals_levenshtein_string_comparison_quickstart.py
1from autoevals.string import Levenshtein
2
3def test_levenshtein():
4    # Example inputs
5    output = "The quick brown fox jumps over the lazy dog"
6    expected = "The quick brown fox jumps over a lazy dog"
7
8    # Initialize the evaluator
9    evaluator = Levenshtein()
10
11    # Run the evaluator
12    result = evaluator(output, expected)
13
14    # Print the score (0 to 1) and error if any
15    print(f"Score: {result.score}")
16    print(f"Metadata: {result.metadata}")
17
18if __name__ == "__main__":
19    test_levenshtein()