Skip to content

Commit b9b7f95

Browse files
authored
Merge pull request #25 from codelion/feat-implement-reread
Feat implement reread
2 parents a677c15 + c99ca9c commit b9b7f95

File tree

3 files changed

+49
-1
lines changed

3 files changed

+49
-1
lines changed

README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,7 @@ or your own code where you want to use the results from optillm. You can use it
125125
| CoT with Reflection | `cot_reflection` | Implements chain-of-thought reasoning with \<thinking\>, \<reflection> and \<output\> sections |
126126
| PlanSearch | `plansearch` | Implements a search algorithm over candidate plans for solving a problem in natural language |
127127
| LEAP | `leap` | Learns task-specific principles from few shot examples |
128+
| ReRead | `re2` | Implements rereading to improve reasoning by processing queries twice |
128129

129130
## Available Parameters
130131

@@ -194,6 +195,7 @@ Authorization: Bearer your_secret_api_key
194195

195196
## References
196197

198+
- [Re-Reading Improves Reasoning in Large Language Models](https://arxiv.org/abs/2309.06275)
197199
- [In-Context Principle Learning from Mistakes](https://arxiv.org/abs/2402.05403)
198200
- [Planning In Natural Language Improves LLM Search For Code Generation](https://arxiv.org/abs/2409.03733)
199201
- [Self-Consistency Improves Chain of Thought Reasoning in Language Models](https://arxiv.org/abs/2203.11171)

optillm.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
from optillm.cot_reflection import cot_reflection
1818
from optillm.plansearch import plansearch
1919
from optillm.leap import leap
20+
from optillm.reread import re2_approach
2021

2122
# Setup logging
2223
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
@@ -69,7 +70,7 @@
6970

7071
# List of known approaches
7172
known_approaches = ["mcts", "bon", "moa", "rto", "z3", "self_consistency", "pvg", "rstar",
72-
"cot_reflection", "plansearch", "leap"]
73+
"cot_reflection", "plansearch", "leap", "re2"]
7374

7475
# Optional API key configuration to secure the proxy
7576
@app.before_request
@@ -149,6 +150,8 @@ def proxy():
149150
final_response = plansearch(system_prompt, initial_query, client, model, n=n)
150151
elif approach == 'leap':
151152
final_response = leap(system_prompt, initial_query, client, model)
153+
elif approach == 're2':
154+
final_response = re2_approach(system_prompt, initial_query, client, model, n=n)
152155
else:
153156
raise ValueError(f"Unknown approach: {approach}")
154157
except Exception as e:

optillm/reread.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
import logging
2+
3+
logger = logging.getLogger(__name__)
4+
5+
def re2_approach(system_prompt, initial_query, client, model, n=1):
6+
"""
7+
Implement the RE2 (Re-Reading) approach for improved reasoning in LLMs.
8+
9+
Args:
10+
system_prompt (str): The system prompt to be used.
11+
initial_query (str): The initial user query.
12+
client: The OpenAI client object.
13+
model (str): The name of the model to use.
14+
n (int): Number of completions to generate.
15+
16+
Returns:
17+
str or list: The generated response(s) from the model.
18+
"""
19+
logger.info("Using RE2 approach for query processing")
20+
21+
# Construct the RE2 prompt
22+
re2_prompt = f"{initial_query}\nRead the question again: {initial_query}"
23+
24+
messages = [
25+
{"role": "system", "content": system_prompt},
26+
{"role": "user", "content": re2_prompt}
27+
]
28+
29+
try:
30+
response = client.chat.completions.create(
31+
model=model,
32+
messages=messages,
33+
n=n
34+
)
35+
36+
if n == 1:
37+
return response.choices[0].message.content.strip()
38+
else:
39+
return [choice.message.content.strip() for choice in response.choices]
40+
41+
except Exception as e:
42+
logger.error(f"Error in RE2 approach: {str(e)}")
43+
raise

0 commit comments

Comments
 (0)