|
16 | 16 | from cot_reflection import cot_reflection |
17 | 17 | from plansearch import plansearch |
18 | 18 | from leap import leap |
| 19 | +from agent import agent_approach |
19 | 20 |
|
20 | 21 | # Setup logging |
21 | 22 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
@@ -98,6 +99,8 @@ def proxy(): |
98 | 99 | final_response = plansearch(system_prompt, initial_query, client, model, n=n) |
99 | 100 | elif approach == 'leap': |
100 | 101 | final_response = leap(system_prompt, initial_query, client, model) |
| 102 | + elif approach == 'agent': |
| 103 | + final_response = agent_approach(system_prompt, initial_query, client, model, max_attempts=3) |
101 | 104 | else: |
102 | 105 | raise ValueError(f"Unknown approach: {approach}") |
103 | 106 | except Exception as e: |
@@ -135,7 +138,7 @@ def proxy(): |
135 | 138 | def main(): |
136 | 139 | parser = argparse.ArgumentParser(description="Run LLM inference with various approaches.") |
137 | 140 | parser.add_argument("--approach", type=str, choices=["auto", "mcts", "bon", "moa", "rto", "z3", "self_consistency", "pvg", "rstar", |
138 | | - "cot_reflection", "plansearch", "leap"], default="auto", help="Inference approach to use") |
| 141 | + "cot_reflection", "plansearch", "leap", "agent"], default="auto", help="Inference approach to use") |
139 | 142 | parser.add_argument("--simulations", type=int, default=2, help="Number of MCTS simulations") |
140 | 143 | parser.add_argument("--exploration", type=float, default=0.2, help="Exploration weight for MCTS") |
141 | 144 | parser.add_argument("--depth", type=int, default=1, help="Simulation depth for MCTS") |
|
0 commit comments