Skip to content

Commit 2d00413

Browse files
authored
llmModule 25.06.1: add options that are needed for integration into DataTools (#7)
* add options that are needed for integration into DataTools * update news.md * add tests * fix message * rename file * fix output * update messages * align argument names * fix test * add shinyTryCatch * update ReadMe
1 parent 7914756 commit 2d00413

27 files changed

+414
-161
lines changed

DESCRIPTION

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
Package: llmModule
22
Type: Package
33
Title: R Interface for Large Language Model APIs
4-
Version: 25.06.0
4+
Version: 25.06.1
55
Authors@R: c(
66
person("Ricardo", "Fernandes", email = "ldv1452@gmail.com", role = c("aut", "cre")),
77
person("Antonia", "Runge", email = "antonia.runge@inwt-statistics.de", role = c("aut"))
@@ -15,11 +15,13 @@ LazyData: true
1515
Depends: R (>= 3.5.0)
1616
Imports:
1717
data.table,
18+
futile.logger,
1819
httr2,
1920
ollamar,
2021
shiny,
2122
shinyAce,
22-
shinyjs
23+
shinyjs,
24+
shinyTools
2325
Suggests:
2426
lintr (>= 1.0.2),
2527
testthat (>= 2.0.0),

NAMESPACE

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ S3method(send_prompt,RemoteLlmApi)
1414
S3method(update,OllamaModelManager)
1515
export(as_table)
1616
export(get_llm_models)
17+
export(has_internet)
1718
export(llm_generate_prompt_server)
1819
export(llm_generate_prompt_ui)
1920
export(new_LlmPromptConfig)
@@ -39,5 +40,7 @@ importFrom(ollamar,list_models)
3940
importFrom(ollamar,pull)
4041
importFrom(ollamar,test_connection)
4142
importFrom(shinyAce,aceEditor)
43+
importFrom(shinyAce,updateAceEditor)
44+
importFrom(shinyTools,shinyTryCatch)
4245
importFrom(shinyjs,disable)
4346
importFrom(shinyjs,enable)

NEWS.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,9 @@
1+
# llmModule 25.06.1
2+
3+
## New features
4+
5+
* add options to enable integration of `llm_generate_prompt` shiny module into the import module of the `DataTools` package (#3)
6+
17
# llmModule 25.06.0
28

39
## New features

R/00-LlmApi-helpers.R

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,11 @@
1+
filter_model_list <- function(models, exclude_pattern) {
2+
if (!missing(exclude_pattern) && length(exclude_pattern) > 0 && exclude_pattern != "") {
3+
models <- models[!grepl(exclude_pattern, models)]
4+
}
5+
6+
return(models)
7+
}
8+
19
categorize_model <- function(id) {
210
if (grepl("^gpt-[0-9.]+", id)) {
311
match <- regmatches(id, regexpr("^gpt-[0-9.]+", id))

R/00-Namespace.R

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,8 @@
33
#' @importFrom data.table data.table rbindlist
44
#' @importFrom httr2 req_body_json req_headers req_perform req_timeout request resp_body_json
55
#' @importFrom ollamar list_models pull test_connection
6-
#' @importFrom shinyAce aceEditor
6+
#' @importFrom shinyAce aceEditor updateAceEditor
77
#' @importFrom shinyjs disable enable
8+
#' @importFrom shinyTools shinyTryCatch
89
#'
910
NULL

R/00-OllamaModelManager-helpers.R

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,6 @@ is_model_available <- function(manager, model_name) {
7676
# @param manager An OllamaModelManager object
7777
# @param model_name Character string of the model name
7878
# @return An OllamaModel object
79-
# @export
8079
pull_model_if_needed <- function(manager, model_name) {
8180

8281
available <- is_model_available(manager, model_name)

R/00-logging.R

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
logging <- function(msg, ...) {
2+
futile.logger::flog.info(msg, ...)
3+
}
4+
5+
logDebug <- function(msg, ...) {
6+
futile.logger::flog.debug(msg, ...)
7+
}
8+
9+
logWarn <- function(msg, ...) {
10+
futile.logger::flog.warn(msg, ...)
11+
}

R/01-LlmPromptConfig-class.R

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,8 @@
44
#' making requests to Large Language Models (LLMs) such as OpenAI's GPT models and DeepSeek models.
55
#'
66
#' @param prompt_content character string containing the primary instruction or query for the model. This serves as the main input to the LLM.
7-
#' @param model Character string specifying the model to use (e.g., `'gpt-4-turbo'` for OpenAI or `'deepseek-chat'` for DeepSeek). To retrieve a list of valid models for each LLM, use the \code{get_llm_models()} function.
7+
#' @param model Character string specifying the model to use (e.g., `'gpt-4.1'` for OpenAI or `'deepseek-chat'` for DeepSeek).
8+
#' To retrieve a list of valid models for each LLM, use the \code{get_llm_models()} method
89
#'
910
#' See the following documentation for valid models:
1011
#' - \href{https://platform.openai.com/docs/models}{OpenAI model list}
@@ -30,10 +31,10 @@
3031
#' models <- get_llm_models(api)
3132
#' }
3233
#'
33-
#' # Create a parameter object for OpenAI GPT-4 Turbo
34+
#' # Create a parameter object for OpenAI GPT-4.1
3435
#' params <- new_LlmPromptConfig(
3536
#' prompt_content = 'Explain entropy in simple terms.',
36-
#' model = 'gpt-4-turbo',
37+
#' model = 'gpt-4.1',
3738
#' temperature = 0.7,
3839
#' max_tokens = 150
3940
#' )

R/01-LlmResponse-class.R

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -52,10 +52,8 @@ new_LlmResponse <- function(api, prompt_config) {
5252
error = function(e) e
5353
)
5454

55-
if (inherits(content, "error")) {
56-
response <- list()
57-
attr(response, "error") <- content$message
58-
return(response)
55+
if (!is.null(attr(content, "error"))) {
56+
return(content)
5957
}
6058

6159
response <- structure(

R/01-LocalLlmApi-class.R

Lines changed: 22 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,16 @@
33
#' @param manager An OllamaModelManager object
44
#' @param new_model Character, model name input from user (can be partial) of the model to pull
55
#' @param base_url Local Ollama base URL
6+
#' @param exclude_pattern Character, a regex pattern to exclude certain models from the list of
7+
#' available models, e.g. "babbage|curie|dall-e|davinci|text-embedding|tts|whisper"
68
#'
79
#' @return An object of class LocalLlmApi, or a list with an "error" attribute if construction fails.
810
#' @export
911
new_LocalLlmApi <- function(
1012
manager,
1113
new_model = "",
12-
base_url = Sys.getenv("OLLAMA_BASE_URL", unset = "http://localhost:11434")
14+
base_url = Sys.getenv("OLLAMA_BASE_URL", unset = "http://localhost:11434"),
15+
exclude_pattern = ""
1316
) {
1417
if (!is_ollama_running(url = base_url)) {
1518
api <- list()
@@ -45,7 +48,8 @@ new_LocalLlmApi <- function(
4548
list(
4649
url = base_url,
4750
provider = "Ollama",
48-
manager = manager
51+
manager = manager,
52+
exclude_pattern = exclude_pattern
4953
),
5054
class = c("LocalLlmApi", "LlmApi")
5155
)
@@ -84,10 +88,17 @@ print.LocalLlmApi <- function(x, ...) {
8488
#' @export
8589
get_llm_models.LocalLlmApi <- function(x, ...) {
8690
local_models <- x$manager$local_models
91+
exclude_pattern <- x$exclude_pattern
8792

88-
# Extract categories
89-
categories <- vapply(local_models, function(x) categorize_model(x), character(1))
93+
# Extract models
9094
models <- vapply(local_models, function(x) x, character(1))
95+
96+
# Filter models
97+
models <- models |> filter_model_list(exclude_pattern = exclude_pattern)
98+
99+
# Extract categories
100+
categories <- vapply(models, function(x) categorize_model(x), character(1))
101+
91102
models_list <- extract_named_model_list(models, categories)
92103

93104
return(models_list)
@@ -127,11 +138,14 @@ send_prompt.LocalLlmApi <- function(api, prompt_config) {
127138
body$num_predict <- prompt_config$max_tokens
128139
}
129140

130-
req <- httr2::request(paste0(api$url, "/api/generate")) |>
131-
httr2::req_body_json(body) |>
132-
httr2::req_perform()
141+
resp <- request(paste0(api$url, "/api/generate")) |>
142+
req_body_json(body) |>
143+
try_send_request()
133144

134-
resp <- httr2::resp_body_json(req)
145+
# return early if there was an error
146+
if (!is.null(attr(resp, "error"))) {
147+
return(resp)
148+
}
135149

136150
result <- list(
137151
choices = list(

0 commit comments

Comments
 (0)