-
Notifications
You must be signed in to change notification settings - Fork 213
Expand file tree
/
Copy pathrun_evaluation.py
More file actions
63 lines (54 loc) · 3.12 KB
/
run_evaluation.py
File metadata and controls
63 lines (54 loc) · 3.12 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
#!/usr/bin/env python3
"""
Kodezi Chronos Benchmark Evaluation Script
This script allows you to evaluate your own debugging models against the
Multi-Random Retrieval (MRR) benchmark used in the Chronos paper.
Usage:
python run_evaluation.py --model gpt-4 --dataset mrr-5k
python run_evaluation.py --model your-model --api-endpoint http://localhost:8080
For detailed usage, see benchmarks/README.md
"""
import argparse
import json
import os
from typing import Dict, List, Any
def main():
parser = argparse.ArgumentParser(description='Evaluate debugging models against Chronos benchmarks')
parser.add_argument('--model', required=True, help='Model name (gpt-4, claude-3, or custom)')
parser.add_argument('--dataset', default='mrr-5k', help='Dataset to use (mrr-5k, mrr-1k-sample)')
parser.add_argument('--api-endpoint', help='API endpoint for custom models')
parser.add_argument('--output', default='results.json', help='Output file for results')
args = parser.parse_args()
print(f"""
╔══════════════════════════════════════════════════════════╗
║ Kodezi Chronos Benchmark Evaluation Suite ║
║ ║
║ Evaluating: {args.model:<43} ║
║ Dataset: {args.dataset:<47} ║
║ ║
║ This evaluation will test debugging capabilities on: ║
║ • Null pointer exceptions ║
║ • Race conditions ║
║ • Memory leaks ║
║ • API misuse ║
║ • Performance bugs ║
║ • Logic errors ║
║ ║
║ Results will be compared against: ║
║ • Kodezi Chronos: 65.3% success rate ║
║ • GPT-4: 8.5% success rate ║
║ • Claude-3: 7.8% success rate ║
║ ║
╚══════════════════════════════════════════════════════════╝
""")
# Placeholder for actual evaluation logic
print("\n⚠️ Note: This is a demonstration script.")
print("Full benchmark implementation will be available with the research release.")
print("\nTo implement your own evaluation:")
print("1. Load debugging scenarios from benchmarks/multi-random-retrieval/dataset/")
print("2. For each scenario, attempt to fix the bug with your model")
print("3. Validate the fix using the provided test cases")
print("4. Calculate success rate and other metrics")
print("\nSee benchmarks/multi-random-retrieval/MRR_IMPLEMENTATION.md for details.")
if __name__ == "__main__":
main()