Skip to content

Commit

Permalink
Get rid of raelly_run
Browse files Browse the repository at this point in the history
  • Loading branch information
nicovank committed Feb 20, 2024
1 parent 50617ca commit d2b56f0
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 28 deletions.
7 changes: 3 additions & 4 deletions src/chatdbg/chatdbg_gdb.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class Why(gdb.Command):
def __init__(self):
gdb.Command.__init__(self, "why", gdb.COMMAND_USER)

def invoke(self, arg, from_tty, really_run=True):
def invoke(self, arg, from_tty):
try:
frame = gdb.selected_frame()
except:
Expand All @@ -53,11 +53,10 @@ def invoke(self, arg, from_tty, really_run=True):
# which _probably_ means a SEGV.
last_error_type = "SIGSEGV"
the_prompt = buildPrompt()
args, _ = chatdbg_utils.parse_known_args(arg.split())
if the_prompt:
# Call `explain` function with pieces of the_prompt as arguments.
chatdbg_utils.explain(
the_prompt[0], the_prompt[1], the_prompt[2], really_run
)
chatdbg_utils.explain(the_prompt[0], the_prompt[1], the_prompt[2], args)


Why()
Expand Down
33 changes: 13 additions & 20 deletions src/chatdbg/chatdbg_lldb.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,41 +207,34 @@ def why(
command: str,
result: str,
internal_dict: dict,
really_run=True,
) -> None:
"""
Root cause analysis for an error.
The why command is where we use the refined stack trace system.
We send information once to GPT, and receive an explanation.
There is a bit of work to determine what context we end up sending to GPT.
Notably, we send a summary of all stack frames, including locals.
"""
# Check if there is debug info.
if not is_debug_build(debugger):
print(
"Your program must be compiled with debug information (`-g`) to use `why`."
)
return
# Check if program is attached to a debugger.
sys.exit(1)
# Check if debugger is attached to a program.
if not get_target():
print("Must be attached to a program to ask `why`.")
return
# Check if code has been run before executing the `why` command.
sys.exit(1)
# Check if the program has been run prior to executing the `why` command.
thread = get_thread()
if not thread:
print("Must run the code first to ask `why`.")
return
# Check why code stopped running.
sys.exit(1)
if thread.GetStopReason() == lldb.eStopReasonBreakpoint:
# Check if execution stopped at a breakpoint or an error.
print("Execution stopped at a breakpoint, not an error.")
return
the_prompt = buildPrompt(debugger)
chatdbg_utils.explain(the_prompt[0], the_prompt[1], the_prompt[2], really_run)

sys.exit(1)

@lldb.command("why_prompt")
def why_prompt(
debugger: lldb.SBDebugger, command: str, result: str, internal_dict: dict
) -> None:
"""Output the prompt that `why` would generate (for debugging purposes only)."""
why(debugger, command, result, internal_dict, really_run=False)
the_prompt = buildPrompt(debugger)
args, _ = chatdbg_utils.parse_known_args(command)
chatdbg_utils.explain(the_prompt[0], the_prompt[1], the_prompt[2], args)


@lldb.command("print-test")
Expand Down
10 changes: 6 additions & 4 deletions src/chatdbg/chatdbg_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def _get_help_string(self, action):
return help


def parse_known_args(argument_string: str) -> Tuple[argparse.Namespace, List[str]]:
def parse_known_args(argv: List[str]) -> Tuple[argparse.Namespace, List[str]]:
description = textwrap.dedent(
rf"""
[b]ChatDBG[/b]: A Python debugger that uses AI to tell you `why`.
Expand Down Expand Up @@ -71,7 +71,7 @@ def parse_known_args(argument_string: str) -> Tuple[argparse.Namespace, List[str
help="the timeout for API calls in seconds",
)

return parser.parse_known_args(argument_string)
return parser.parse_known_args(argv)


def get_model() -> str:
Expand Down Expand Up @@ -102,7 +102,9 @@ def get_model() -> str:
return model


def explain(source_code: str, traceback: str, exception: str, really_run=True) -> None:
def explain(
source_code: str, traceback: str, exception: str, args: argparse.Namespace
) -> None:
user_prompt = f"""
Explain what the root cause of this error is, given the following source code
context for each stack frame and a traceback, and propose a fix. In your
Expand All @@ -126,7 +128,7 @@ def explain(source_code: str, traceback: str, exception: str, really_run=True) -

input_tokens = llm_utils.count_tokens(model, user_prompt)

if not really_run:
if args.show_prompt:
print(user_prompt)
print(f"Total input tokens: {input_tokens}")
return
Expand Down

0 comments on commit d2b56f0

Please sign in to comment.