Skip to content

Commit

Permalink
Fixes the parsed warning and error message from OpenAi, updates the t…
Browse files Browse the repository at this point in the history
…est snapshot
  • Loading branch information
edgararuiz committed May 1, 2024
1 parent e9271b1 commit 23d53c1
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 7 deletions.
17 changes: 11 additions & 6 deletions R/backend-openai.R
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ ch_openai_complete <- function(prompt, defaults, stream = TRUE) {
)
ret <- ch_openai_parse(ret, defaults)
if (req_result$status_code != 200) {
cli_alert_warning(ret)
ch_openai_error(ret, use_abort = FALSE)
if (inherits(req_result, "httr2_response")) {
req_result <- paste0(
resp_status(req_result),
Expand All @@ -92,7 +92,7 @@ ch_openai_complete <- function(prompt, defaults, stream = TRUE) {
if (!inherits(req_result, "character")) {
req_result <- "Undefined error"
}
abort(req_result)
cli_abort(req_result, call = NULL)
}
ch_openai_error(ret)
ret
Expand Down Expand Up @@ -185,7 +185,7 @@ ch_openai_token <- function(defaults, fail = TRUE) {
ret
}

ch_openai_error <- function(x) {
ch_openai_error <- function(x, use_abort = TRUE) {
if (is.null(x)) {
return(invisible())
}
Expand All @@ -197,7 +197,12 @@ ch_openai_error <- function(x) {
"Error from OpenAI\n",
substr(x, 10, nchar(x))
)
abort(error_msg)
if(use_abort) {
abort(error_msg)
} else {
cli_alert_warning(error_msg)
}

}
invisible()
}
Expand Down Expand Up @@ -230,9 +235,9 @@ ch_openai_parse <- function(x, defaults) {
if ("error" %in% names(json_res)) {
json_error <- json_res$error
out <- paste0(
"{{error}}Type:",
"{{error}}{.emph Type:} ",
json_error$type,
"\nMessage: ",
"\n{.emph Message:} ",
json_error$message
)
}
Expand Down
2 changes: 1 addition & 1 deletion tests/testthat/_snaps/backend-openai.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
Code
parsed
Output
[1] "{{error}}Type:invalid_request_error\nMessage: This model's maximum context length is 4097 tokens. However, your messages resulted in 22261 tokens. Please reduce the length of the messages."
[1] "{{error}}{.emph Type:} invalid_request_error\n{.emph Message:} This model's maximum context length is 4097 tokens. However, your messages resulted in 22261 tokens. Please reduce the length of the messages."

# Init messages work

Expand Down

0 comments on commit 23d53c1

Please sign in to comment.