Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Add Exception Handling for LiteLLM #22

Merged
merged 37 commits into from
Nov 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
d829db9
Merge branch 'GatorEducator:main' into Exception_Handling
hemanialaparthi Sep 16, 2024
ff6f225
feat: add initial api key exception handling functions
dyga01 Sep 18, 2024
8499dad
add(advise.py): functions to handle server URL validation, connection…
hemanialaparthi Sep 18, 2024
705e782
add(advise.py): function to check for internet connection
hemanialaparthi Sep 18, 2024
01a3bdb
feat(advice.py): internet connection execption handling
hemanialaparthi Sep 18, 2024
7eecce8
add(advise.py): code comments
hemanialaparthi Sep 18, 2024
cae208e
feat(exceptions.py): added all the exceptions to the correct file
dyga01 Sep 21, 2024
6041c6c
fix(advise.py): added the correct validation functions and worked to …
dyga01 Sep 21, 2024
5b2663d
add(advise.py): handle connection error
hemanialaparthi Sep 23, 2024
934abcf
feat(advise.py): traceback function to get the function
hemanialaparthi Sep 23, 2024
b546e04
add(advice.py): new implemention to get_traceback
hemanialaparthi Sep 24, 2024
64e8e5e
fix(exceptions.py): work to implement general litellm exception handl…
dyga01 Sep 24, 2024
cc580d8
fix(exceptions.py): fix the raise general purpose exception error
dyga01 Sep 24, 2024
2b0d0e7
add(exceptions.py): console and modify the models
hemanialaparthi Sep 24, 2024
f23cdbe
add(test_exception.py): file added
hemanialaparthi Sep 24, 2024
31fcb0f
add(exceptions.py): detailed exceptions messages for the litellm_exce…
hemanialaparthi Sep 24, 2024
f2a2656
add(test_execptions.py): test for exceptions
hemanialaparthi Sep 25, 2024
f135caa
fix: fix the branch to pass linting checks
dyga01 Sep 25, 2024
42e19be
fix: fix the ruff linting checks in main.py
dyga01 Sep 25, 2024
d48b13f
fix(exceptions.py): symbex issues
hemanialaparthi Sep 25, 2024
f99606b
fix(advise.py): removed the exit statements from the api server and m…
dyga01 Oct 2, 2024
d9213fa
fix(advise.py): removed the sys import statement
dyga01 Oct 2, 2024
ba1dd2b
add(exceptions.py): exceptions for server connectivity issues
hemanialaparthi Oct 3, 2024
bd9fde9
feat(advise.py): sort the imports
hemanialaparthi Oct 24, 2024
233368e
add(advice.py): fix merge conflicts.
hemanialaparthi Oct 24, 2024
a5e7004
Merge branch 'main' into Exception_Handling
hemanialaparthi Oct 24, 2024
c4d315d
feat(advise.py): fix the indentation error.
hemanialaparthi Oct 24, 2024
819bf1e
fix(advise.py): fix indentation & format issues in the advise.py
hemanialaparthi Oct 24, 2024
16c528c
fix(advise.py): add changes to the output of the exception handling f…
hemanialaparthi Oct 29, 2024
9b93fc4
change(test_exceptions.py): test case output since output has been ch…
hemanialaparthi Oct 29, 2024
a95d21b
add: console to print the get_lltellm_traceback()
hemanialaparthi Oct 31, 2024
6c9e795
change test outputs to work with the new format.
hemanialaparthi Oct 31, 2024
ae9e76a
fix(exceptions.py): fix the output messages
hemanialaparthi Oct 31, 2024
2a5b2e9
add(test_exceptions.py): add console to the test cases to ensure the …
hemanialaparthi Oct 31, 2024
483c104
fix(test_exceptions.py): ruff linting issues
hemanialaparthi Oct 31, 2024
ed3c97f
add(advice.py): addition to text that says this error has been output…
hemanialaparthi Nov 4, 2024
1866c79
Merge branch 'GatorEducator:main' into Exception_Handling
hemanialaparthi Nov 12, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
178 changes: 74 additions & 104 deletions execexam/advise.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

import random
import socket
import sys
from typing import List, Optional

import openai
Expand All @@ -12,6 +11,7 @@
from rich.panel import Panel

from . import enumerations
from .exceptions import get_litellm_traceback


def load_litellm() -> None:
Expand Down Expand Up @@ -81,12 +81,10 @@ def check_advice_model(
)
and advice_model is None
):
return_code = 1
hemanialaparthi marked this conversation as resolved.
Show resolved Hide resolved
console.print()
console.print(
"[red]The --advice-model option is required when --report includes 'advice' or 'all'"
)
sys.exit(return_code)


def check_advice_server(
Expand All @@ -105,12 +103,10 @@ def check_advice_server(
and advice_method == enumerations.AdviceMethod.api_server
and advice_server is None
):
return_code = 1
console.print()
console.print(
"[red]The --advice-server option is required when --advice-method is 'api_server'"
)
sys.exit(return_code)
elif (
report is not None
and (
Expand All @@ -120,12 +116,10 @@ def check_advice_server(
and advice_method == enumerations.AdviceMethod.api_server
and not validate_url(advice_server)
):
return_code = 1
console.print()
console.print(
"[red]The --advice-server option did not specify a valid URL"
"[bold red]Before sending to LLM:\nThe --advice-server option did not specify a valid URL"
)
sys.exit(return_code)


def fix_failures( # noqa: PLR0913
Expand All @@ -147,108 +141,84 @@ def fix_failures( # noqa: PLR0913
# Call the handle_connection_error function
handle_connection_error(console)
return
with console.status(
"[bold green] Getting Feedback from ExecExam's Coding Mentor"
):
# the test overview is a string that contains both
# the filtered test output and the details about the passing
# and failing assertions in the test cases
test_overview = filtered_test_output + exec_exam_test_assertion_details
# create an LLM debugging request that contains all of the
# information that is needed to provide advice about how
# to fix the bug(s) in the program that are part of an
# executable examination; note that, essentially, an
# examination consists of Python functions that a student
# must complete and then test cases that confirm the correctness
# of the functions that are implemented; note also that
# ExecExam has a Pytest plugin that collects additional details
llm_debugging_request = (
"I am an undergraduate student completing a programming examination."
+ "You may never make suggestions to change the source code of the test cases."
+ "Always make suggestions about how to improve the Python source code of the program under test."
+ "Always give Python code in a Markdown fenced code block with your suggested program."
+ "Always start your response with a friendly greeting and overview of what you will provide."
+ "Always conclude by saying that you are making a helpful suggestion but could be wrong."
+ "Always be helpful, upbeat, friendly, encouraging, and concise when making a response."
+ "Your task is to suggest, in a step-by-step fashion, how to fix the bug(s) in the program?"
+ "What follows is all of the information you need to complete the debugging task."
+ f"Here is the test overview with test output and details about test assertions: {test_overview}"
+ f"Here is a brief overview of the test failure information: {failing_test_details}"
+ f"Here is the source code for the one or more failing test(s): {failing_test_code}"
)
# the API key approach expects that the person running the execexam
# tool has specified an API key for a support cloud-based LLM system
if advice_method == enumerations.AdviceMethod.api_key:
# submit the debugging request to the LLM-based mentoring system
response = completion( # type: ignore
model=advice_model,
messages=[{"role": "user", "content": llm_debugging_request}],
try:
with console.status(
"[bold green] Getting Feedback from ExecExam's Coding Mentor"
):
test_overview = (
filtered_test_output + exec_exam_test_assertion_details
)
# display the advice from the LLM-based mentoring system
# in a panel that is created by using the rich library
if fancy:
console.print(
Panel(
Markdown(
str(
response.choices[0].message.content, # type: ignore
llm_debugging_request = (
"I am an undergraduate student completing a programming examination."
+ " You may never make suggestions to change the source code of the test cases."
+ " Always make suggestions about how to improve the Python source code of the program under test."
+ " Always give Python code in a Markdown fenced code block with your suggested program."
+ " Always start your response with a friendly greeting and overview of what you will provide."
+ " Always conclude by saying that you are making a helpful suggestion but could be wrong."
+ " Always be helpful, upbeat, friendly, encouraging, and concise when making a response."
+ " Your task is to suggest, in a step-by-step fashion, how to fix the bug(s) in the program?"
+ f" Here is the test overview with test output and details about test assertions: {test_overview}"
+ f" Here is a brief overview of the test failure information: {failing_test_details}"
+ f" Here is the source code for the one or more failing test(s): {failing_test_code}"
)

if advice_method == enumerations.AdviceMethod.api_key:
# Submit the debugging request to the LLM-based mentoring system
response = completion( # type: ignore
model=advice_model,
messages=[
{"role": "user", "content": llm_debugging_request}
],
)
# Display the advice from the LLM-based mentoring system
if fancy:
console.print(
Panel(
Markdown(
str(response.choices[0].message.content), # type: ignore
),
code_theme=syntax_theme.value,
expand=False,
title="Advice from ExecExam's Coding Mentor (API Key)",
padding=1,
)
)
else:
console.print(
Markdown(
str(response.choices[0].message.content), # type: ignore
),
expand=False,
title="Advice from ExecExam's Coding Mentor (API Key)",
padding=1,
)
console.print()

elif advice_method == enumerations.AdviceMethod.api_server:
# Use the OpenAI approach to submit the debugging request
client = openai.OpenAI(
api_key="anything", base_url=advice_server
)
else:
console.print(
Markdown(
str(
response.choices[0].message.content, # type: ignore
),
code_theme=syntax_theme.value,
),
response = client.chat.completions.create(
model=advice_model,
messages=[
{"role": "user", "content": llm_debugging_request}
],
)
console.print()
# the apiserver approach expects that the person running the execexam
# tool will specify the URL of a remote LLM-based mentoring system
# that is configured to provide access to an LLM system for advice
elif advice_method == enumerations.AdviceMethod.api_server:
# use the OpenAI approach to submitting the
# debugging request to the LLM-based mentoring system
# that is currently running on a remote LiteLLM system;
# note that this does not seem to work correctly if
# you use the standard LiteLLM approach as done with
# the standard API key approach elsewhere in this file
client = openai.OpenAI(
api_key="anything",
base_url=advice_server,
)
# submit the debugging request to the LLM-based mentoring system
# using the specified model and the debugging prompt
response = client.chat.completions.create(
model=advice_model,
messages=[{"role": "user", "content": llm_debugging_request}],
)
if fancy:
console.print(
Panel(
if fancy:
console.print(
Panel(
Markdown(
str(response.choices[0].message.content),
code_theme=syntax_theme.value,
),
expand=False,
title="Advice from ExecExam's Coding Mentor (API Server)",
padding=1,
)
)
else:
console.print(
Markdown(
str(response.choices[0].message.content),
code_theme=syntax_theme.value,
str(response.choices[0].message.content), # type: ignore
),
expand=False,
title="Advice from ExecExam's Coding Mentor (API Server)",
padding=1,
)
)
else:
console.print(
Markdown(
str(
response.choices[0].message.content, # type: ignore
),
code_theme=syntax_theme.value,
),
)
console.print()
console.print()
except Exception:
get_litellm_traceback(console)
54 changes: 54 additions & 0 deletions execexam/exceptions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
"""Define exceptions for the input errors in the command line."""

import sys

# from rich.console import Console (ask if i need to import this?) i dont think i do because i pass it as an atg


def get_litellm_traceback(console) -> None:
"""Print the traceback of the last exception."""
exc_type, exc_obj, _ = sys.exc_info()

if exc_type is None:
return
# List of litellm exception types and their explanations
litellm_exceptions = {
"NotFoundError": "LLM resource not found. Please check your model and/or endpoint.",
"AuthenticationError": "API authentication failed. Please verify your API key.",
"RateLimitError": "Rate limit exceeded. Wait and retry or check API key.\nNOTE: This error can sometimes be caused by an invalid API key.",
"InvalidRequestError": "Malformed API request. Please review parameters.",
"APIError": "Internal LLM API error. Retry later.",
"APIConnectionError": "Connection failed. \nNOTE: This error can sometimes be caused by an invalid server URL. Verify your server URL.",
}

# if statements to display exceptions
if exc_type.__name__ in litellm_exceptions:
console.print(
f"[bold red]Exception Type: {exc_type.__name__}[/bold red]"
)
console.print(f"Explanation: {litellm_exceptions[exc_type.__name__]}")
else:
# default behavior for non-litellm exceptions
console.print(
f"[bold red]Exception Type: {exc_type.__name__}[/bold red]"
)
console.print(f"Error Message: {exc_obj!s}")

# general purpose ouput as a backup
console.print(
"\n[bold red]If your issue persists, ensure the model you entered is correct, such as:[/bold red]"
)
console.print("[bold blue]- anthropic/claude-3-haiku-20240307[/bold blue]")
console.print("[bold blue]- anthropic/claude-3-opus-20240229[/bold blue]")
console.print("[bold blue]- groq/llama3-8b-8192[/bold blue]")
console.print(
"[bold blue]- openrouter/meta-llama/llama-3.1-8b-instruct:free[/bold blue]"
)

console.print(
"\n[bold red]Please visit [bold blue]https://docs.litellm.ai/docs/providers [/bold blue]for more valid LiteLLM models[bold red]"
)

console.print(
"\n[bold red]For server connectivity issues, please visit [bold blue]https://docs.litellm.ai/docs/simple_proxy [/bold blue]for a valid LiteLLM proxy.[/bold red]"
)
79 changes: 79 additions & 0 deletions tests/test_exceptions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
"""Test Suite for Exceptions Module."""

from unittest.mock import patch

from rich.console import Console

from execexam.exceptions import get_litellm_traceback

# Create a console object for testing
console = Console()


def test_not_found_error():
"""Test case for NotFoundError."""
# Mocking sys.exc_info to simulate a NotFoundError exception
with patch(
"sys.exc_info",
return_value=(
type("NotFoundError", (Exception,), {}),
Exception("Resource not found"),
None,
),
):
with patch("rich.console.Console.print") as mock_print:
# Call the function to get the traceback
get_litellm_traceback(console)
# Assert that the correct messages are printed for NotFoundError
mock_print.assert_any_call(
"[bold red]Exception Type: NotFoundError[/bold red]"
)
mock_print.assert_any_call(
"Explanation: LLM resource not found. Please check your model and/or endpoint."
)


def test_authentication_error():
"""Test case for AuthenticationError."""
# Mocking sys.exc_info to simulate an AuthenticationError exception
with patch(
"sys.exc_info",
return_value=(
type("AuthenticationError", (Exception,), {}),
Exception("Authentication failed"),
None,
),
):
with patch("rich.console.Console.print") as mock_print:
# Call the function to get the traceback
get_litellm_traceback(console)
# Assert that the correct messages are printed for AuthenticationError
mock_print.assert_any_call(
"[bold red]Exception Type: AuthenticationError[/bold red]"
)
mock_print.assert_any_call(
"Explanation: API authentication failed. Please verify your API key."
)


def test_rate_limit_error():
"""Test case for RateLimitError."""
# Mocking sys.exc_info to simulate a RateLimitError exception
with patch(
"sys.exc_info",
return_value=(
type("RateLimitError", (Exception,), {}),
Exception("Rate limit exceeded"),
None,
),
):
with patch("rich.console.Console.print") as mock_print:
# Call the function to get the traceback
get_litellm_traceback(console)
# Assert that the correct messages are printed for RateLimitError
mock_print.assert_any_call(
"[bold red]Exception Type: RateLimitError[/bold red]"
)
mock_print.assert_any_call(
"Explanation: Rate limit exceeded. Wait and retry or check API key.\nNOTE: This error can sometimes be caused by an invalid API key."
)
Loading