LLMR offers a unified interface for Large Language Models in R. It supports multiple providers, robust retries, structured output, and embeddings.
install.packages("LLMR") # CRAN
# Development:
# remotes::install_github("asanaei/LLMR")
library(LLMR)
cfg <- llm_config(
provider = "openai",
model = "gpt-4o-mini",
temperature = 0.2,
max_tokens = 256
)
Store keys in environment variables such as OPENAI_API_KEY, ANTHROPIC_API_KEY, GEMINI_API_KEY.
r <- call_llm(
config = cfg,
messages = c(
system = "You are a branding expert.",
user = "Six-word catch-phrase for eco-friendly balloons.")
)
print(r) # text + status line
as.character(r) # just the text
finish_reason(r)
tokens(r)
is_truncated(r)
schema <- list(
type = "object",
properties = list(
label = list(type = "string"),
score = list(type = "number")
),
required = list("label","score"),
additionalProperties = FALSE
)
cfg_s <- enable_structured_output(cfg, schema = schema)
resp <- call_llm(cfg_s, c(system="Reply JSON only.", user="Label and score for 'MNIST'."))
parsed <- llm_parse_structured(resp)
str(parsed)
Or use higher-level helpers:
words <- c("excellent","awful","fine")
out <- llm_fn_structured(
x = words,
prompt = "Classify '{x}' and output {label, score in [0,1]} as JSON.",
.config = cfg,
.schema = schema,
.fields = c("label","score")
)
out
sentences <- c(
one="Quiet rivers mirror bright skies.",
two="Thunder shakes the mountain path."
)
emb_cfg <- llm_config(
provider = "voyage",
model = "voyage-large-2",
embedding = TRUE
)
emb <- call_llm(emb_cfg, sentences) |> parse_embeddings()
dim(emb)
Batch embeddings:
emb <- get_batched_embeddings(
texts = sentences,
embed_config = emb_cfg,
batch_size = 8
)
chat <- chat_session(cfg, system = "You teach statistics tersely.")
chat$send("Explain p-values in 12 words.")
chat$send("Now give a three-word analogy.")
print(chat)
setup_llm_parallel(workers = 4)
experiments <- build_factorial_experiments(
configs = list(cfg),
user_prompts = c("Summarize in one sentence: The Apollo program."),
system_prompts= "Be concise."
)
res <- call_llm_par(experiments, progress = TRUE)
reset_llm_parallel()
Issues and pull requests are welcome. Include a minimal reproducible exampleyy.