diff --git a/.gitignore b/.gitignore
index 03fd37c..2266d5a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,4 @@
-current_context.txt
-current_context.config
+current_context.pickle
contexts/test.txt
src/__pycache__/
src/openaiapirc
\ No newline at end of file
diff --git a/contexts/bash-context.txt b/contexts/bash-context.txt
deleted file mode 100644
index 1e83029..0000000
--- a/contexts/bash-context.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-## engine: code-cushman-001
-## temperature: 0
-## max_tokens: 300
-## shell: bash
-## multi_turn: off
-## token_count: 110
-
-# what processes are hogging the most cpu?
-ps aux | sort -nrk 3,3 | head -n 10
-
-# stop the chrome processes
-kill -9 $(ps aux | grep chrome | awk '{print $2}')
-
-# what's my IP?
-curl ifconfig.me
-
-# what's the weather in San Francisco?
-curl wttr.in/SanFrancisco
-
-# make a directory called web-app
-mkdir web-app
-
-# add an html, css, and js file to it
-touch web-app/index.html
-touch web-app/style.css
-touch web-app/script.js
-
-# add a hello world website to the index
-echo '
Hello World!
' > web-app/index.html
-
-# open it with vi
-vi web-app/index.html
-
-# what's running on port 1018?
-lsof -i :1018
-
-# kill process 1584
-kill -9 1584
-
-# what other devices are on my network?
-arp -a
-
-# how much storage is left?
-df -h
-
diff --git a/contexts/bash-context.yaml b/contexts/bash-context.yaml
new file mode 100644
index 0000000..c39368b
--- /dev/null
+++ b/contexts/bash-context.yaml
@@ -0,0 +1,41 @@
+type: code-engine
+config:
+ model-config:
+ engine: "code-cushman-001"
+ temperature: 0
+ max_tokens: 1024
+ shell: "bash"
+ multi_turn: "off"
+ token_count: 110
+ comment-operator: "#"
+ description-comment-operator: "##"
+description: "This converts Natural Language Commands to Command Line Commands. Here are some examples"
+examples:
+ - input: "what processes are hogging the most cpu?"
+ response: "ps aux | sort -nrk 3,3 | head -n 10"
+ - input: "stop the chrome processes"
+ response: "kill -9 $(ps aux | grep chrome | awk '{print $2}')"
+ - input: "what's my IP?"
+ response: "curl ifconfig.me"
+ - input: "what's the weather in San Francisco?"
+ response: "curl wttr.in/SanFrancisco"
+ - input: "make a directory called web-app"
+ response: "mkdir web-app"
+ - input: "add an html, css, and js file to it"
+ response: |
+ "touch web-app/index.html
+ touch web-app/style.css
+ touch web-app/script.js"
+ - input: "add a hello world website to the index"
+ response: "echo 'Hello World!
' > web-app/index.html"
+ - input: "open it with vi"
+ response: "vi web-app/index.html"
+ - input: "what's running on port 1018?"
+ response: "lsof -i :1018"
+ - input: "kill process 1584"
+ response: "kill -9 1584"
+ - input: "what other devices are on my network?"
+ response: "arp -a"
+ - input: "how much storage is left?"
+ response: "df -h"
+flow-reset-text: "Ignore the previous examples and start afresh, from here on out, this is an unrelated conversation"
diff --git a/contexts/powershell-context.txt b/contexts/powershell-context.txt
deleted file mode 100644
index aa02f43..0000000
--- a/contexts/powershell-context.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-## engine: code-cushman-001
-## temperature: 0
-## max_tokens: 300
-## shell: powershell
-## multi_turn: off
-## token_count: 110
-
-# what processes are hogging the most cpu?
-Get-Process | Sort-Object -Property CPU -Descending | Select-Object -First 10
-
-# stop the chrome processes
-Get-Process chrome | Stop-Process
-
-# what's my IP address?
-(Invoke-WebRequest -uri "http://ifconfig.me/ip").Content
-
-# what's the weather in New York?
-(Invoke-WebRequest -uri "wttr.in/NewYork").Content
-
-# make a git ignore with node modules and src in it
-"node_modules
-src" | Out-File .gitignore
-
-# open it in notepad
-notepad .gitignore
-
-# what's running on port 1018?
-Get-Process -Id (Get-NetTCPConnection -LocalPort 1018).OwningProcess
-
-# kill process 1584
-Stop-Process -Id 1584
-
-# what other devices are on my network?
-Get-NetIPAddress | Format-Table
-
-# how much storage is left on my pc?
-Get-WmiObject -Class Win32_LogicalDisk | Select-Object -Property DeviceID,FreeSpace,Size,DriveType | Format-Table -AutoSize
-
-# how many GB is 367247884288 B?
-(367247884288 / 1GB)
-
diff --git a/contexts/powershell-context.yaml b/contexts/powershell-context.yaml
new file mode 100644
index 0000000..36d2ae1
--- /dev/null
+++ b/contexts/powershell-context.yaml
@@ -0,0 +1,41 @@
+type: code-engine
+config:
+ model-config:
+ engine: "code-cushman-001"
+ temperature: 0
+ max_tokens: 1024
+ shell: "powershell"
+ multi_turn: "off"
+ token_count: 110
+ comment-operator: "#"
+ description-comment-operator: "##"
+description: "This converts Natural Language Commands to Command Line Commands. Here are some examples"
+examples:
+ - input: "what processes are hogging the most cpu?"
+ response: "Get-Process | Sort-Object -Property CPU -Descending | Select-Object -First 10"
+ - input: "stop the chrome processes"
+ response: "Get-Process chrome | Stop-Process"
+ - input: "what's my IP address?"
+ response: '(Invoke-WebRequest -uri "http://ifconfig.me/ip").Content"'
+ - input: "what's the weather in New York?"
+ response: '(Invoke-WebRequest -uri "wttr.in/NewYork").Content'
+ - input: "make a git ignore with node modules and src in it"
+ response: |
+ "node_modules"
+ src" | Out-File .gitignore
+ - input: "open it in notepad"
+ response: "notepad .gitignore"
+ - input: "what's running on port 1018?"
+ response: "Get-Process -Id (Get-NetTCPConnection -LocalPort 1018).OwningProcess"
+ - input: "kill process 1584"
+ response: "Stop-Process -Id 1584"
+ - input: "what other devices are on my network?"
+ response: "Get-NetIPAddress | Format-Table"
+ - input: "how much storage is left on my pc?"
+ response: "Get-WmiObject -Class Win32_LogicalDisk | Select-Object -Property DeviceID,FreeSpace,Size,DriveType | Format-Table -AutoSize"
+ - input: "how many GB is 367247884288 B?"
+ response: "(367247884288 / 1GB)"
+flow-reset-text: "Ignore the previous examples and start afresh, from here on out, this is an unrelated conversation"
+
+
+
diff --git a/contexts/powershell-voice-cognitive-service.txt b/contexts/powershell-voice-cognitive-service.txt
deleted file mode 100644
index 6204b4c..0000000
--- a/contexts/powershell-voice-cognitive-service.txt
+++ /dev/null
@@ -1,64 +0,0 @@
-## engine: code-cushman-001
-## temperature: 0
-## max_tokens: 300
-## shell: powershell
-## multi_turn: on
-## token_count: 110
-
-# what processes are hogging the most cpu?
-Get-Process | Sort-Object -Property CPU -Descending | Select-Object -First 10
-spx synthesize --text ("Here are the top 10 processes by memory used") --speakers --quiet
-
-# stop the chrome processes
-Get-Process chrome | Stop-Process
-spx synthesize --text ("I stopped chrome") --speakers --quiet
-
-# make a readme with a project header and two sections
-"# Project Header
-## Section 1
-## Section 2" | Out-File "./readme.md"
-spx synthesize --text ("I created your readme!") --speakers --quiet
-
-# what's running on port 3000?
-Get-Process -Id (Get-NetTCPConnection -LocalPort 3000).OwningProcess
-spx synthesize --text ("I found the process running on port 3000") --speakers --quiet
-
-# kill process 1584
-Stop-Process -Id 1584
-spx synthesize --text ("I killed the process with ID 1584") --speakers --quiet
-
-# what other devices are on my network?
-Get-NetIPAddress | Format-Table
-spx synthesize --text ("Here are the IP addresses on your network") --speakers --quiet
-
-# how much storage is left on my pc?
-Get-WmiObject -Class Win32_LogicalDisk | Select-Object -Property DeviceID,FreeSpace,Size,DriveType | Format-Table -AutoSize
-spx synthesize --text ("Here is the amount of storage left on your computer") --speakers --quiet
-
-# how many GB is 367247884288 B?
-spx synthesize --text ("367247884288 B is " + (367247884288 / 1073741824) + " GB") --speakers --quiet
-
-# where do you live?
-spx synthesize --text ("I live in " + $HOME) --speakers --quiet
-
-# what's my IP address?
-$ip = (Invoke-WebRequest -Uri http://ifconfig.me/ip).Content
-spx synthesize --text ("Your IP address is " + $ip) --speakers --quiet
-
-# tell me a joke
-spx synthesize --text ("What do dog robots do? They byte!") --speakers --quiet
-
-# You: How do I combine arrays?
-spx synthesize --text ("Bot: You can use the concat() method") --speakers --quiet
-
-# tell me a joke
-spx synthesize --text ("Why do programmers always mix up Halloween and Christmas? Because Oct 31 = Dec 25") --speakers --quiet
-
-# tell me a joke
-spx synthesize --text ("The best method for accelerating a computer is the one that boosts it by 9.8 m/s^2") --speakers --quiet
-
-# what's the meaning of life?
-spx synthesize --text ("The meaning of life is 42") --speakers --quiet
-
-# where are you located
-spx synthesize --text ("I live in " + $HOME) --speakers --quiet
diff --git a/contexts/powershell-voice-cognitive-service.yaml b/contexts/powershell-voice-cognitive-service.yaml
new file mode 100644
index 0000000..72c6d41
--- /dev/null
+++ b/contexts/powershell-voice-cognitive-service.yaml
@@ -0,0 +1,73 @@
+type: code-engine
+config:
+ model-config:
+ engine: "code-cushman-001"
+ temperature: 0
+ max_tokens: 1024
+ shell: "powershell"
+ multi_turn: "off"
+ token_count: 110
+ comment-operator: "#"
+ description-comment-operator: "##"
+description: "This converts Natural Language Commands to Command Line Commands. Here are some examples"
+examples:
+ - input: "what processes are hogging the most cpu?"
+ response: |
+ Get-Process | Sort-Object -Property CPU -Descending | Select-Object -First 10
+ spx synthesize --text ("Here are the top 10 processes by memory used") --speakers --quiet
+
+ - input: "stop the chrome processes"
+ response: |
+ Get-Process chrome | Stop-Process
+ spx synthesize --text ("I stopped chrome") --speakers --quiet
+
+ - input: "what's running on port 3000?"
+ response: |
+ Get-Process -Id (Get-NetTCPConnection -LocalPort 3000).OwningProcess
+ spx synthesize --text ("I found the process running on port 3000") --speakers --quiet
+
+ - input: "kill process 1584"
+ response: |
+ Stop-Process -Id 1584
+ spx synthesize --text ("I killed the process with ID 1584") --speakers --quiet
+
+ - input: "what other devices are on my network?"
+ response: |
+ Get-NetIPAddress | Format-Table
+ spx synthesize --text ("Here are the IP addresses on your network") --speakers --quiet
+
+ - input: "how much storage is left on my pc?"
+ response: |
+ Get-WmiObject -Class Win32_LogicalDisk | Select-Object -Property DeviceID,FreeSpace,Size,DriveType | Format-Table -AutoSize
+ spx synthesize --text ("Here is the amount of storage left on your computer") --speakers --quiet
+
+ - input: "how many GB is 367247884288 B?"
+ response: |
+ spx synthesize --text ("367247884288 B is " + (367247884288 / 1073741824) + " GB") --speakers --quiet
+ - input: "where do you live?"
+ response: |
+ spx synthesize --text ("I live in " + $HOME) --speakers --quiet
+ - input: "what's my IP address?"
+ response: |
+ $ip = (Invoke-WebRequest -Uri http://ifconfig.me/ip).Content
+ spx synthesize --text ("Your IP address is " + $ip) --speakers --quiet
+
+ - input: "tell me a joke"
+ response: |
+ spx synthesize --text ("What do dog robots do? They byte!") --speakers --quiet
+ - input: "You: How do I combine arrays?"
+ response: |
+ spx synthesize --text ("Bot: You can use the concat() method") --speakers --quiet
+ - input: "tell me a joke"
+ response: |
+ spx synthesize --text ("Why do programmers always mix up Halloween and Christmas? Because Oct 31 = Dec 25") --speakers --quiet
+ - input: "tell me a joke"
+ response: |
+ spx synthesize --text ("The best method for accelerating a computer is the one that boosts it by 9.8 m/s^2") --speakers --quiet
+ - input: "what's the meaning of life?"
+ response: |
+ spx synthesize --text ("The meaning of life is 42") --speakers --quiet
+ - input: "where are you located"
+ response: |
+ spx synthesize --text ("I live in " + $HOME) --speakers --quiet
+flow-reset-text: "Ignore the previous examples and start afresh, from here on out, this is an unrelated conversation"
\ No newline at end of file
diff --git a/contexts/powershell-voice.txt b/contexts/powershell-voice.txt
deleted file mode 100644
index dccd84b..0000000
--- a/contexts/powershell-voice.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-## engine: code-cushman-001
-## temperature: 0
-## max_tokens: 300
-## shell: powershell
-## multi_turn: on
-## token_count: 110
-
-# what processes are hogging the most cpu?
-Get-Process | Sort-Object -Property CPU -Descending | Select-Object -First 10
-
-# stop the chrome processes
-Get-Process chrome | Stop-Process
-
-# make a readme with a project header and two sections
-"# Project Header
-## Section 1
-## Section 2" | Out-File "./readme2.md"
-
-# what's running on port 3000?
-Get-Process -Id (Get-NetTCPConnection -LocalPort 3000).OwningProcess
-
-# kill process 1584
-Stop-Process -Id 1584
-
-# what other devices are on my network?
-Get-NetIPAddress | Format-Table
-
-# how much storage is left on my pc?
-Get-WmiObject -Class Win32_LogicalDisk | Select-Object -Property DeviceID,FreeSpace,Size,DriveType | Format-Table -AutoSize
-
-# how many GB is 367247884288 B?
-(367247884288 / 1GB)
-
-# what's the meaning of life?
-(new-object -ComObject SAPI.SPVoice).Speak("The meaning of life is 42")
-
-# where are you located
-(new-object -ComObject SAPI.SPVoice).Speak("I live in " + $HOME)
-
diff --git a/contexts/powershell-voice.yaml b/contexts/powershell-voice.yaml
new file mode 100644
index 0000000..e8c9eeb
--- /dev/null
+++ b/contexts/powershell-voice.yaml
@@ -0,0 +1,37 @@
+type: code-engine
+config:
+ model-config:
+ engine: "code-cushman-001"
+ temperature: 0
+ max_tokens: 1024
+ shell: "powershell"
+ multi_turn: "off"
+ token_count: 110
+ comment-operator: "#"
+ description-comment-operator: "##"
+description: "This converts Natural Language Commands to Command Line Commands. Here are some examples"
+examples:
+ - input: "what processes are hogging the most cpu?"
+ response: "Get-Process | Sort-Object -Property CPU -Descending | Select-Object -First 10"
+ - input: "stop the chrome processes"
+ response: "Get-Process chrome | Stop-Process"
+ - input: "make a readme with a project header and two sections"
+ response: |
+ # Project Header
+ ## Section 1
+ ## Section 2" | Out-File "./readme2.md"
+ - input: "what's running on port 3000?"
+ response: "Get-Process -Id (Get-NetTCPConnection -LocalPort 3000).OwningProcess"
+ - input: "kill process 1584"
+ response: "Stop-Process -Id 1584"
+ - input: "what other devices are on my network?"
+ response: "Get-NetIPAddress | Format-Table"
+ - input: "how much storage is left on my pc?"
+ response: "Get-WmiObject -Class Win32_LogicalDisk | Select-Object -Property DeviceID,FreeSpace,Size,DriveType | Format-Table -AutoSize"
+ - input: "how many GB is 367247884288 B?"
+ response: "(367247884288 / 1GB)"
+ - input: "what's the meaning of life?"
+ response: '(new-object -ComObject SAPI.SPVoice).Speak("The meaning of life is 42")'
+ - input: "where are you located"
+ response: '(new-object -ComObject SAPI.SPVoice).Speak("I live in " + $HOME)'
+flow-reset-text: "Ignore the previous examples and start afresh, from here on out, this is an unrelated conversation"
diff --git a/contexts/zsh-context.txt b/contexts/zsh-context.txt
deleted file mode 100644
index 6128e8a..0000000
--- a/contexts/zsh-context.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-## engine: code-cushman-001
-## temperature: 0
-## max_tokens: 300
-## shell: zsh
-## multi_turn: off
-## token_count: 110
-
-# what processes are hogging the most cpu?
-ps aux | sort -nrk 3,3 | head -n 10
-
-# stop the chrome processes
-kill -9 $(ps aux | grep chrome | awk '{print $2}')
-
-# what's my IP?
-curl ifconfig.me
-
-# what's the weather in San Francisco?
-curl wttr.in/SanFrancisco
-
-# make a directory called web-app
-mkdir web-app
-
-# add an html, css, and js file to it
-touch web-app/index.html
-touch web-app/style.css
-touch web-app/script.js
-
-# add a hello world website to the index
-echo 'Hello World!
' > web-app/index.html
-
-# open it with vi
-vi web-app/index.html
-
-# what's running on port 1018?
-lsof -i :1018
-
-# kill process 1584
-kill -9 1584
-
-# what other devices are on my network?
-arp -a
-
-# how much storage is left?
-df -h
-
diff --git a/contexts/zsh-context.yaml b/contexts/zsh-context.yaml
new file mode 100644
index 0000000..3bab4ac
--- /dev/null
+++ b/contexts/zsh-context.yaml
@@ -0,0 +1,41 @@
+type: code-engine
+config:
+ model-config:
+ engine: "code-cushman-001"
+ temperature: 0
+ max_tokens: 1024
+ shell: "zsh"
+ multi_turn: "off"
+ token_count: 110
+ comment-operator: "#"
+ description-comment-operator: "##"
+description: "This converts Natural Language Commands to Command Line Commands. Here are some examples"
+examples:
+ - input: "what processes are hogging the most cpu?"
+ response: "ps aux | sort -nrk 3,3 | head -n 10"
+ - input: "stop the chrome processes"
+ response: "kill -9 $(ps aux | grep chrome | awk '{print $2}')"
+ - input: "what's my IP?"
+ response: "curl ifconfig.me"
+ - input: "what's the weather in San Francisco?"
+ response: "curl wttr.in/SanFrancisco"
+ - input: "make a directory called web-app"
+ response: "mkdir web-app"
+ - input: "add an html, css, and js file to it"
+ response: |
+ touch web-app/index.html
+ touch web-app/style.css
+ touch web-app/script.js
+ - input: "add a hello world website to the index"
+ response: "echo 'Hello World!
' > web-app/index.html"
+ - input: "open it with vi"
+ response: "vi web-app/index.html"
+ - input: "what's running on port 1018?"
+ response: "lsof -i :1018"
+ - input: "kill process 1584"
+ response: "kill -9 1584"
+ - input: "what other devices are on my network?"
+ response: "arp -a"
+ - input: "how much storage is left?"
+ response: "df -h"
+flow-reset-text: "Ignore the previous examples and start afresh, from here on out, this is an unrelated conversation"
diff --git a/scripts/zsh_setup.sh b/scripts/zsh_setup.sh
index cde3cd8..34ec693 100755
--- a/scripts/zsh_setup.sh
+++ b/scripts/zsh_setup.sh
@@ -42,6 +42,7 @@ validateSettings()
# Append settings and CTRL-g binding in .zshrc
configureZsh()
{
+ touch $zshrcPath
# Remove previous settings
sed -i '' '/### Codex CLI setup - start/,/### Codex CLI setup - end/d' $zshrcPath
echo "Removed previous settings in $zshrcPath if present"
diff --git a/src/codex_query.py b/src/codex_query.py
index ade2a9a..a3db093 100755
--- a/src/codex_query.py
+++ b/src/codex_query.py
@@ -9,7 +9,7 @@
import psutil
from pathlib import Path
-from prompt_file import PromptFile
+from prompt_file import ModelConfig, Prompt
from commands import get_command_result
MULTI_TURN = "off"
@@ -24,9 +24,6 @@
# api keys located in the same directory as this file
API_KEYS_LOCATION = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'openaiapirc')
-PROMPT_CONTEXT = Path(__file__).with_name('current_context.txt')
-
-
# Read the secret_key from the ini file ~/.config/openaiapirc
# The format is:
# [openai]
@@ -46,11 +43,10 @@ def create_template_ini_file():
print('# engine=')
sys.exit(1)
-def initialize():
+def initialize(shell: str):
"""
Initialize openAI and shell mode
"""
- global ENGINE
# Check if file at API_KEYS_LOCATION exists
create_template_ini_file()
@@ -59,18 +55,9 @@ def initialize():
openai.api_key = config['openai']['secret_key'].strip('"').strip("'")
openai.organization = config['openai']['organization_id'].strip('"').strip("'")
- ENGINE = config['openai']['engine'].strip('"').strip("'")
-
- prompt_config = {
- 'engine': ENGINE,
- 'temperature': TEMPERATURE,
- 'max_tokens': MAX_TOKENS,
- 'shell': SHELL,
- 'multi_turn': MULTI_TURN,
- 'token_count': 0
- }
+ engine = config['openai']['engine'].strip('"').strip("'")
- return PromptFile(PROMPT_CONTEXT.name, prompt_config)
+ return Prompt(shell, engine)
def is_sensitive_content(content):
"""
@@ -130,7 +117,7 @@ def is_sensitive_content(content):
return (output_label != "0")
-def get_query(prompt_file):
+def get_query(prompt_generator):
"""
uses the stdin to get user input
input is either treated as a command or as a Codex query
@@ -140,68 +127,62 @@ def get_query(prompt_file):
# get input from terminal or stdin
if DEBUG_MODE:
- entry = input("prompt: ") + '\n'
+ entry = input("prompt: ")
else:
- entry = sys.stdin.read()
+ # Remove extreaneous newlines and hashtag from the input string
+ entry = sys.stdin.read().strip("\n").strip("#").strip(" ")
# first we check if the input is a command
- command_result, prompt_file = get_command_result(entry, prompt_file)
+ try:
+ command_result, prompt_generator = get_command_result(entry, prompt_generator)
+ except Exception as e:
+ print (str(e))
# if input is not a command, then query Codex, otherwise exit command has been run successfully
if command_result == "":
- return entry, prompt_file
+ return entry, prompt_generator
else:
sys.exit(0)
-def detect_shell():
- global SHELL
- global PROMPT_CONTEXT
+def get_shell_prefix(shell: str):
+ # prime codex for the corresponding shell type
+ if shell == "zsh":
+ return '#!/bin/zsh\n\n'
+ elif shell == "bash":
+ return '#!/bin/bash\n\n'
+ elif shell == "powershell":
+ return '<# powershell #>\n\n'
+ elif shell == "unknown":
+ print("\n#\tUnsupported shell type, please use # set shell ")
+ return ""
+ else:
+ return '#' + shell + '\n\n'
+
+def detect_shell():
parent_process_name = psutil.Process(os.getppid()).name()
POWERSHELL_MODE = bool(re.fullmatch('pwsh|pwsh.exe|powershell.exe', parent_process_name))
BASH_MODE = bool(re.fullmatch('bash|bash.exe', parent_process_name))
ZSH_MODE = bool(re.fullmatch('zsh|zsh.exe', parent_process_name))
- SHELL = "powershell" if POWERSHELL_MODE else "bash" if BASH_MODE else "zsh" if ZSH_MODE else "unknown"
+ shell = "powershell" if POWERSHELL_MODE else "bash" if BASH_MODE else "zsh" if ZSH_MODE else "unknown"
- shell_prompt_file = Path(os.path.join(os.path.dirname(__file__), "..", "contexts", "{}-context.txt".format(SHELL)))
+ prefix = get_shell_prefix(shell)
- if shell_prompt_file.is_file():
- PROMPT_CONTEXT = shell_prompt_file
+ return shell, prefix
if __name__ == '__main__':
- detect_shell()
- prompt_file = initialize()
+ shell, prefix = detect_shell()
+ prompt_generator: Prompt = initialize(shell)
try:
- user_query, prompt_file = get_query(prompt_file)
-
- config = prompt_file.config if prompt_file else {
- 'engine': ENGINE,
- 'temperature': TEMPERATURE,
- 'max_tokens': MAX_TOKENS,
- 'shell': SHELL,
- 'multi_turn': MULTI_TURN,
- 'token_count': 0
- }
-
- # use query prefix to prime Codex for correct scripting language
- prefix = ""
- # prime codex for the corresponding shell type
- if config['shell'] == "zsh":
- prefix = '#!/bin/zsh\n\n'
- elif config['shell'] == "bash":
- prefix = '#!/bin/bash\n\n'
- elif config['shell'] == "powershell":
- prefix = '<# powershell #>\n\n'
- elif config['shell'] == "unknown":
- print("\n#\tUnsupported shell type, please use # set shell ")
- else:
- prefix = '#' + config['shell'] + '\n\n'
-
- codex_query = prefix + prompt_file.read_prompt_file(user_query) + user_query
-
+ user_query, prompt_generator = get_query(prompt_generator)
+ codex_query = prefix + prompt_generator.prompt_engine.build_prompt(user_query, prompt_generator.prompt_engine.config.model_config.multi_turn)
# get the response from codex
- response = openai.Completion.create(engine=config['engine'], prompt=codex_query, temperature=config['temperature'], max_tokens=config['max_tokens'], stop="#")
+ response = openai.Completion.create(engine=prompt_generator.prompt_engine.config.model_config.engine,
+ prompt=codex_query,
+ temperature=prompt_generator.prompt_engine.config.model_config.temperature,
+ max_tokens=prompt_generator.prompt_engine.config.model_config.max_tokens,
+ stop=prompt_generator.prompt_engine.config.input_prefix)
completion_all = response['choices'][0]['text']
@@ -211,9 +192,10 @@ def detect_shell():
print(completion_all)
# append output to prompt context file
- if config['multi_turn'] == "on":
+ if prompt_generator.prompt_engine.config.model_config.multi_turn == "on":
if completion_all != "" or len(completion_all) > 0:
- prompt_file.add_input_output_pair(user_query, completion_all)
+ prompt_generator.prompt_engine.add_interaction(user_query, completion_all)
+ prompt_generator.save_prompt_engine(prompt_generator.prompt_engine)
except FileNotFoundError:
print('\n\n# Codex CLI error: Prompt file not found, try again')
diff --git a/src/commands.py b/src/commands.py
index 4d0bde3..5235abd 100644
--- a/src/commands.py
+++ b/src/commands.py
@@ -4,7 +4,17 @@
from pathlib import Path
from prompt_file import *
-def get_command_result(input, prompt_file):
+import re
+
+param_type_mapping = {
+ 'engine': str,
+ 'temperature': float,
+ 'max_tokens': int,
+ 'shell': str,
+}
+
+
+def get_command_result(input, prompt_generator):
"""
Checks if the input is a command and if so, executes it
Currently supported commands:
@@ -23,131 +33,116 @@ def get_command_result(input, prompt_file):
Returns: command result or "" if no command matched
"""
- if prompt_file == None:
- return "", None
- config = prompt_file.config
+ config = prompt_generator.prompt_engine.config.model_config
# configuration setting commands
if input.__contains__("set"):
- # set temperature
- if input.__contains__("temperature"):
- input = input.split()
- if len(input) == 4:
- config['temperature'] = float(input[3])
- prompt_file.set_config(config)
- print("# Temperature set to " + str(config['temperature']))
- return "config set", prompt_file
- else:
- return "", prompt_file
- # set max_tokens
- elif input.__contains__("max_tokens"):
- input = input.split()
- if len(input) == 4:
- config['max_tokens'] = int(input[3])
- prompt_file.set_config(config)
- print("# Max tokens set to " + str(config['max_tokens']))
- return "config set", prompt_file
- else:
- return "", prompt_file
- elif input.__contains__("shell"):
- input = input.split()
- if len(input) == 4:
- config['shell'] = input[3]
- prompt_file.set_config(config)
- print("# Shell set to " + str(config['shell']))
- return "config set", prompt_file
- else:
- return "", prompt_file
- elif input.__contains__("engine"):
- input = input.split()
- if len(input) == 4:
- config['engine'] = input[3]
- prompt_file.set_config(config)
- print("# Engine set to " + str(config['engine']))
- return "config set", prompt_file
- else:
- return "", prompt_file
+ # match the command using regex to one of the 4 above set commands
+ match = re.match(r"set (engine|temperature|max_tokens|shell) (.*)", input)
+ if match:
+ # get the command and the value
+ command = match.group(1)
+ value = match.group(2)
+ # check if the value is of the correct type
+ if param_type_mapping[command] == float:
+ value = float(value)
+ elif param_type_mapping[command] == int:
+ value = int(value)
+ elif param_type_mapping[command] == str:
+ value = str(value)
+ # set the value
+ setattr(config, command, value)
+ print (f"\n# {command} set to {value}")
+ return "Configuration setting updated", prompt_generator
- if input.__contains__("show config"):
- prompt_file.show_config()
- return "config shown", prompt_file
+ elif input.__contains__("show config"):
+ prompt_generator.show_config()
+ return "config shown", prompt_generator
# multi turn/single turn commands
- if input.__contains__("multi-turn"):
+ elif input.__contains__("multi-turn"):
# start context
if input.__contains__("start"):
- if config['multi_turn'] == 'off':
- prompt_file.start_multi_turn()
- return "multi turn mode on", prompt_file
-
- return "multi turn mode on", prompt_file
-
+ if getattr(config, 'multi_turn') == 'off':
+ prompt_generator.start_multi_turn()
+ print ("\n# Multi-turn mode started")
+ return "multi turn mode on", prompt_generator
+ else:
+ print ("\n# Multi-turn mode already started")
+ return "multi turn mode already on", prompt_generator
+
# stop context
- if input.__contains__("stop"):
- prompt_file.stop_multi_turn()
- return "multi turn mode off", prompt_file
+ elif input.__contains__("stop"):
+ if getattr(config, 'multi_turn') == 'on':
+ prompt_generator.stop_multi_turn()
+ print ("\n# Multi-turn mode stopped")
+ return "multi turn mode off", prompt_generator
+ else:
+ print ("\n# Multi-turn mode already stopped")
+ return "multi turn mode already off", prompt_generator
# context file commands
- if input.__contains__("context"):
+ elif input.__contains__("context"):
if input.__contains__("default"):
- prompt_file.default_context()
- return "stopped context", prompt_file
+ prompt_generator.default_context()
+ print ("\n# Default context loaded")
+ return "stopped context", prompt_generator
# show context
if input.__contains__("show"):
print('\n')
- with open(prompt_file.file_name, 'r') as f:
- lines = f.readlines()
- lines = lines[6:] # skip headers
-
- line_numbers = 0
- if len(input.split()) > 3:
- line_numbers = int(input.split()[3])
-
- if line_numbers != 0:
- for line in lines[-line_numbers:]:
- print('\n# '+line, end='')
- else:
- print('\n# '.join(lines))
- return "context shown", prompt_file
+ print ("\n# ".join(prompt_generator.prompt_engine.build_context().split("\n")))
+ return "context shown", prompt_generator
# edit context
if input.__contains__("view"):
# open the prompt file in text editor
- if config['shell'] != 'powershell':
- os.system('open {}'.format(prompt_file.file_path))
+ prompt_generator.save_to("current-context.yaml")
+ if getattr(config, 'shell') != 'powershell':
+ os.system('open {}'.format(Path(os.path.join(os.path.dirname(__file__), "..", "contexts", f"current-context.yaml"))))
else:
- os.system('start {}'.format(prompt_file.file_path))
- return "context shown", prompt_file
+ os.system('start {}'.format(Path(os.path.join(os.path.dirname(__file__), "..", "contexts", f"current-context.yaml"))))
+ print ("\n# Context file opened in text editor")
+ return "context shown", prompt_generator
# save context
if input.__contains__("save"):
# save the current prompt file to a new file
# if filename not specified use the current time (to avoid name conflicts)
- filename = time.strftime("%Y-%m-%d_%H-%M-%S") + ".txt"
- if len(input.split()) == 4:
- filename = input.split()[3]
+ # regex to get the filename
+ match = re.match(r"save context (.*)", input)
+ if match:
+ filename = match.group(1)
+ else:
+ filename = time.strftime("%Y-%m-%d_%H-%M-%S") + ".yaml"
- prompt_file.save_to(filename)
- return "context saved", prompt_file
+ prompt_generator.save_to(filename)
+ print(f'\n# Saved to {filename}')
+
+ return "context saved", prompt_generator
# clear context
if input.__contains__("clear"):
# temporary saving deleted prompt file
- prompt_file.default_context()
- return "unlearned interaction", prompt_file
+ prompt_generator.clear_context()
+ print ("\n# Context cleared")
+ return "unlearned interaction", prompt_generator
# load context
if input.__contains__("load"):
- # the input looks like # load context
- # write everything from the file to the prompt file
- input = input.split()
- if len(input) == 4:
- filename = input[3]
- prompt_file.load_context(filename)
- return "context loaded", prompt_file
+
+ # regex to get the filename
+ match = re.match(r"load context (.*)", input)
+ if match:
+ filename = match.group(1)
+ success = prompt_generator.load_context(filename)
+ if success:
+ print (f'\n# Loaded {filename}')
+ else:
+ print (f'\n# Failed to load {filename}, please check the file')
+ return "context loaded", prompt_generator
print('\n#\tInvalid command format, did you specify which file to load?')
- return "context loaded", prompt_file
+ return "context loaded", prompt_generator
- return "", prompt_file
+ return "", prompt_generator
diff --git a/src/prompt_file.py b/src/prompt_file.py
index f86aeac..4e47527 100644
--- a/src/prompt_file.py
+++ b/src/prompt_file.py
@@ -1,262 +1,164 @@
import os
+from pyexpat import model
import time
import configparser
+import pickle
from pathlib import Path
-
+from openai import Model
+from prompt_engine.code_engine import CodeEngine, ModelConfig
API_KEYS_LOCATION = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'openaiapirc')
-class PromptFile:
- context_source_filename = ""
- default_context_filename = "current_context.txt"
+
+class CodexCLIConfig(ModelConfig):
+ """
+ Interaction class is used to store the model config to be used in the prompt engine
+ """
+ def __init__(self, **kwargs):
+ self.engine = kwargs['engine'] if ('engine' in kwargs and kwargs['engine']) else 'code-davinci-002'
+ self.temperature = float(kwargs['temperature']) if ('temperature' in kwargs and kwargs['temperature']) else 0
+ self.max_tokens = int(kwargs['max_tokens']) if ('max_tokens' in kwargs and kwargs['max_tokens']) else 1024
+ self.shell = kwargs['shell'] if ('shell' in kwargs and kwargs['shell']) else 'powershell'
+ self.multi_turn = kwargs['multi_turn'] if ('multi_turn' in kwargs and kwargs['multi_turn']) else 'on'
+ self.token_count = int(kwargs['token_count']) if ('token_count' in kwargs and kwargs['token_count']) else 0
+CURRENT_CONTEXT_LOCATION = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'current_context.pickle')
+
+class Prompt:
+ default_context_filename = "current_context.yaml"
default_file_path = os.path.join(os.path.dirname(__file__), "..", default_context_filename)
- default_config_path = os.path.join(os.path.dirname(__file__), "..", "current_context.config")
+ default_config_path = os.path.join(os.path.dirname(__file__), "..", 'current_context.pickle')
- def __init__(self, file_name, config):
- self.context_source_filename = "{}-context.txt".format(config['shell']) # feel free to set your own default context path here
+ def __init__(self, shell, engine):
- self.file_path = self.default_file_path
- self.config_path = self.default_config_path
-
- # loading in one of the saved contexts
- if file_name != self.default_context_filename:
- self.load_context(file_name, True)
-
- def has_config(self):
- """
- Check if the prompt file has a corresponding config file
- """
- return os.path.isfile(self.config_path)
+ # check if default_config file exists, otherwise create it from the default_context_filename and save it
+ if os.path.exists(self.default_config_path):
+ self.prompt_engine = self.load_prompt_engine(self.default_config_path)
+ else:
+ # TODO: Change this to assignment operator (:=), recieving invalid syntax
+ if not os.path.exists(self.default_file_path):
+ shell_context_path = Path(os.path.join(os.path.dirname(__file__), "..", "contexts", f"{shell}-context.yaml"))
+ self.prompt_engine = self.create_prompt_engine_from_yaml(shell_context_path)
+ self.save_prompt_engine(self.prompt_engine, self.default_config_path)
+ else:
+ temp = self.create_prompt_engine_from_yaml(self.default_file_path)
+ if temp != None:
+ self.prompt_engine = temp
+ self.save_prompt_engine(self.prompt_engine, self.default_config_path)
+ else:
+ raise Exception("Error loading prompt engine")
- def read_config(self):
- """
- Read the prompt config and return a dictionary
- """
-
- if self.has_config() == False:
- self.set_config(self.config)
- return self.config
-
- with open(self.config_path, 'r') as f:
- lines = f.readlines()
-
- config = {
- 'engine': lines[0].split(':')[1].strip(),
- 'temperature': float(lines[1].split(':')[1].strip()),
- 'max_tokens': int(lines[2].split(':')[1].strip()),
- 'shell': lines[3].split(':')[1].strip(),
- 'multi_turn': lines[4].split(':')[1].strip(),
- 'token_count': int(lines[5].split(':')[1].strip())
- }
+ def create_prompt_engine_from_yaml(self, yaml_path):
+ default_config = CodexCLIConfig()
+ prompt_engine = CodeEngine(default_config)
+ with open(yaml_path, 'r') as f:
+ yaml_config = f.read()
+ prompt_engine.load_yaml(yaml_config=yaml_config)
+ return prompt_engine
- self.config = config
- return self.config
-
- def set_config(self, config):
- """
- Set the prompt headers with the new config
- """
- self.config = config
-
- with open(self.config_path, 'w') as f:
- f.write('engine: {}\n'.format(self.config['engine']))
- f.write('temperature: {}\n'.format(self.config['temperature']))
- f.write('max_tokens: {}\n'.format(self.config['max_tokens']))
- f.write('shell: {}\n'.format(self.config['shell']))
- f.write('multi_turn: {}\n'.format(self.config['multi_turn']))
- f.write('token_count: {}\n'.format(self.config['token_count']))
-
def show_config(self):
print('\n')
# read the dictionary into a list of # lines
lines = []
- for key, value in self.config.items():
+ for key, value in self.prompt_engine.config.model_config.__dict__.items():
lines.append('# {}: {}\n'.format(key, value))
print(''.join(lines))
def add_input_output_pair(self, user_query, prompt_response):
"""
- Add lines to file_name and update the token_count
- """
-
- with open(self.file_path, 'a') as f:
- f.write(user_query)
- f.write(prompt_response)
-
- if self.config['multi_turn'] == 'on':
- self.config['token_count'] += len(user_query.split()) + len(prompt_response.split())
- self.set_config(self.config)
-
- def read_prompt_file(self, input):
+ Add an input/output pair to the prompt engine
"""
- Get the updated prompt file
- Checks for token overflow and appends the current input
-
- Returns: the prompt file after appending the input
- """
-
- input_tokens_count = len(input.split())
- need_to_refresh = (self.config['token_count'] + input_tokens_count > 2048)
- if need_to_refresh:
- # delete first 2 lines of prompt context file
- with open(self.file_path, 'r') as f:
- lines = f.readlines()
- prompt = lines[2:] # drop first 2 lines of prompt
- with open(self.file_path, 'w') as f:
- f.writelines(prompt)
-
- # get input from prompt file
- with open(self.file_path, 'r') as f:
- lines = f.readlines()
-
- return ''.join(lines)
-
- def get_token_count(self):
- """
- Get the actual token count
- """
- token_count = 0
- if self.has_config():
- with open(self.config_path, 'r') as f:
- lines = f.readlines()
- token_count = int(lines[5].split(':')[1].strip())
-
- true_token_count = 0
- with open(self.file_path, 'r') as f:
- lines = f.readlines()
- # count the number of words in the prompt file
- for line in lines:
- true_token_count += len(line.split())
-
- if true_token_count != token_count:
- self.config['token_count'] = true_token_count
- self.set_config(self.config)
-
- return true_token_count
+ self.prompt_engine.add_interaction(user_query, prompt_response)
def clear(self):
"""
Clear the prompt file, while keeping the config
Note: saves a copy to the deleted folder
"""
- config = self.read_config()
- filename = time.strftime("%Y-%m-%d_%H-%M-%S") + ".txt"
- with open(self.file_path, 'r') as f:
- lines = f.readlines()
- filename = os.path.join(os.path.dirname(__file__), "..", "deleted", filename)
- with Path(filename).open('w') as f:
- f.writelines(lines)
-
- # delete the prompt file
- with open(self.file_path, 'w') as f:
- f.write('')
-
- print("\n# Context has been cleared, temporarily saved to {}".format(filename))
- self.set_config(config)
+ self.prompt_engine.reset_context()
def clear_last_interaction(self):
"""
Clear the last interaction from the prompt file
"""
- with open(self.file_path, 'r') as f:
- lines = f.readlines()
- if len(lines) > 1:
- lines.pop()
- lines.pop()
- with open(self.file_path, 'w') as f:
- f.writelines(lines)
- print("\n# Unlearned interaction")
-
- def save_to(self, save_name):
- """
- Save the prompt file to a new location with the config
- """
- if not save_name.endswith('.txt'):
- save_name = save_name + '.txt'
- save_path = os.path.join(os.path.dirname(__file__), "..", "contexts", save_name)
-
- # first write the config
- with open(self.config_path, 'r') as f:
- lines = f.readlines()
- lines = ['## ' + line for line in lines]
- with Path(save_path).open('w') as f:
- f.writelines(lines)
-
- # then write the prompt file
- with open(self.file_path, 'r') as f:
- lines = f.readlines()
- with Path(save_path).open('a') as f:
- f.writelines(lines)
-
- print('\n# Context saved to {}'.format(save_name))
+ self.prompt_engine.remove_last_interaction()
def start_multi_turn(self):
"""
Turn on context mode
"""
- self.config['multi_turn'] = 'on'
- self.set_config(self.config)
- print("\n# Multi turn mode is on")
+ self.prompt_engine.config.model_config.multi_turn = "on"
def stop_multi_turn(self):
"""
Turn off context mode
"""
- self.config['multi_turn'] = 'off'
- self.set_config(self.config)
- print("\n# Multi turn mode is off")
+ self.prompt_engine.config.model_config.multi_turn = "off"
def default_context(self):
"""
Go to default context
"""
- self.load_context(self.context_source_filename)
+ shell_context_path = Path(os.path.join(os.path.dirname(__file__), "..", "contexts", f"{self.prompt_engine.config.model_config.shell}-context.yaml"))
+ self.prompt_engine = self.create_prompt_engine_from_yaml(shell_context_path)
+ self.save_prompt_engine(self.prompt_engine, self.default_config_path)
+ print (f'\n# Switched to f"{self.prompt_engine.config.model_config.shell}-context.yaml')
+
+ def clear_context(self):
+ shell_context_path = Path(os.path.join(os.path.dirname(__file__), "..", "contexts", f"{self.prompt_engine.config.model_config.shell}-context.yaml"))
+ self.prompt_engine = self.create_prompt_engine_from_yaml(shell_context_path)
+ self.save_prompt_engine(self.prompt_engine, self.default_config_path)
+ print (f'\n# Cleared context')
+
+ def save_to(self, filename):
+ if not filename.endswith('.yaml'):
+ filename = filename + '.yaml'
+ filepath = Path(os.path.join(os.path.dirname(__file__), "..", "contexts", filename))
+ with open(filepath, 'w') as f:
+ f.write(self.prompt_engine.save_yaml())
- def load_context(self, filename, initialize=False):
+ def load_context(self, filename):
"""
Loads a context file into current_context
"""
- if not filename.endswith('.txt'):
- filename = filename + '.txt'
+ if not filename.endswith('.yaml'):
+ filename = filename + '.yaml'
filepath = Path(os.path.join(os.path.dirname(__file__), "..", "contexts", filename))
# check if the file exists
if filepath.exists():
- with filepath.open('r') as f:
- lines = f.readlines()
# read in the engine name from openaiapirc
config = configparser.ConfigParser()
config.read(API_KEYS_LOCATION)
- ENGINE = config['openai']['engine'].strip('"').strip("'")
+ engine = config['openai']['engine'].strip('"').strip("'")
+
+ self.prompt_engine = self.create_prompt_engine_from_yaml(filepath)
+ self.prompt_engine.config.model_config.engine = engine ## Needed?
- config = {
- 'engine': ENGINE,
- 'temperature': float(lines[1].split(':')[1].strip()),
- 'max_tokens': int(lines[2].split(':')[1].strip()),
- 'shell': lines[3].split(':')[1].strip(),
- 'multi_turn': lines[4].split(':')[1].strip(),
- 'token_count': int(lines[5].split(':')[1].strip())
- }
+ self.save_prompt_engine(self.prompt_engine, self.default_config_path)
+ return True
- # use new config if old config doesn't exist
- if initialize == False or self.has_config() == False:
- self.set_config(config)
- else:
- self.config = self.read_config()
+ else:
+ return False
- lines = lines[6:]
+
- # write to the current prompt file if we are in multi-turn mode
- if initialize == False or self.config['multi_turn'] == "off":
- with open(self.file_path, 'w') as f:
- f.writelines(lines)
-
- if initialize == False:
- print('\n# Context loaded from {}'.format(filename))
- else:
- print("\n# File not found")
- return False
\ No newline at end of file
+ def save_prompt_engine(self, obj, file_path = os.path.join(os.path.dirname(__file__), "..", 'current_context.pickle')):
+ try:
+ with open(file_path, 'wb') as f:
+ pickle.dump(obj, f)
+ except Exception as e:
+ raise Exception("Error saving prompt engine: {}".format(e))
+
+ def load_prompt_engine(self, file_path = os.path.join(os.path.dirname(__file__), "..", 'current_context.pickle')):
+ try:
+ with open(file_path, 'rb') as f:
+ prompt_engine = pickle.load(f)
+ return prompt_engine
+ return None
+ except Exception as e:
+ print("Error loading prompt engine: {}".format(e))
+ return None