Title: | Process text with Ollama, retrieve structured results, cache them locally in DuckDB |
---|---|
Description: | Process text with Ollama, store results in DuckDB. |
Authors: | Giorgio Comai [aut, cre, cph] |
Maintainer: | Giorgio Comai <[email protected]> |
License: | MIT + file LICENSE |
Version: | 0.0.0.9013 |
Built: | 2025-03-28 18:22:37 UTC |
Source: | https://github.com/giocomai/quackingllama |
Disable caching for the current session
ql_disable_db()
ql_disable_db()
Nothing, used for its side effects.
Other database:
ql_enable_db()
,
ql_set_db_options()
ql_disable_db()
ql_disable_db()
Enable storing data in a database for the current session
ql_enable_db(db_type = "DuckDB")
ql_enable_db(db_type = "DuckDB")
db_type |
Defaults to |
Nothing, used for its side effects.
Other database:
ql_disable_db()
,
ql_set_db_options()
ql_enable_db()
ql_enable_db()
Generate a response and return the result in a data frame
ql_generate( prompt_df, only_cached = FALSE, host = NULL, message = NULL, timeout = NULL, error = c("fail", "warn") )
ql_generate( prompt_df, only_cached = FALSE, host = NULL, message = NULL, timeout = NULL, error = c("fail", "warn") )
prompt_df |
A data frame with all inputs passed to the LLM, typically
created with |
only_cached |
Defaults to FALSE. If TRUE, only cached responses are returned. |
host |
The address where the Ollama API can be reached, e.g.
|
timeout |
If not set with |
error |
Defines how errors should be handled, defaults to "fail", i.e.
if an error emerges while querying the LLM, the function stops. If set to
"warn", it sets the response to |
A data frame, including a response column, as well as other information returned by the model.
## Not run: ql_prompt("a haiku") |> ql_generate() ## End(Not run)
## Not run: ql_prompt("a haiku") |> ql_generate() ## End(Not run)
Retrieve
ql_get_db_options(options = c("db", "db_type", "db_folder", "db_filename"))
ql_get_db_options(options = c("db", "db_type", "db_folder", "db_filename"))
options |
Available options that |
A list with the selected options.
ql_get_db_options() ## Retrieve only selected option ql_get_db_options("db_type")
ql_get_db_options() ## Retrieve only selected option ql_get_db_options("db_type")
Get available models
ql_get_models(host = "http://localhost:11434")
ql_get_models(host = "http://localhost:11434")
host |
Defaults to "http://localhost:11434", where locally deployed Ollama usually responds. |
A data frame (a tibble) with details on all locally available models.
## Not run: ql_get_models() ## End(Not run)
## Not run: ql_get_models() ## End(Not run)
Get options
ql_get_options( options = c("system", "model", "host", "temperature", "seed", "keep_alive", "timeout"), system = NULL, model = NULL, host = NULL, temperature = NULL, seed = NULL, keep_alive = NULL, timeout = NULL )
ql_get_options( options = c("system", "model", "host", "temperature", "seed", "keep_alive", "timeout"), system = NULL, model = NULL, host = NULL, temperature = NULL, seed = NULL, keep_alive = NULL, timeout = NULL )
options |
A character vector used to filter which options should effectively be returned. Defaults to all available. |
A list with all available options (or those selected with
options
)
ql_set_options( model = "llama3.2", host = "http://localhost:11434", system = "You are a helpful assistant.", temperature = 0, seed = 42, keep_alive = "5m" ) ql_get_options()
ql_set_options( model = "llama3.2", host = "http://localhost:11434", system = "You are a helpful assistant.", temperature = 0, seed = 42, keep_alive = "5m" ) ql_get_options()
Mostly used internally.
ql_hash(prompt_df)
ql_hash(prompt_df)
prompt_df |
A data frame with all inputs passed to the LLM, typically
created with |
A tibble, such as those returned by ql_prompt()
, but always
including a hash column.
ql_prompt("a haiku", hash = FALSE) |> ql_hash()
ql_prompt("a haiku", hash = FALSE) |> ql_hash()
Typically passed to ql_generate()
.
ql_prompt( prompt, system = NULL, format = NULL, model = NULL, images = NULL, temperature = NULL, seed = NULL, host = NULL, hash = TRUE )
ql_prompt( prompt, system = NULL, format = NULL, model = NULL, images = NULL, temperature = NULL, seed = NULL, host = NULL, hash = TRUE )
prompt |
A prompt for the LLM. |
system |
System message to pass to the model. See official documentation for details. For example: "You are a helpful assistant." |
model |
The name of the model, e.g. |
temperature |
Numeric value comprised between 0 and 1 passed to the model. When set to 0 and with the same seed, the response to the same prompt is always exactly the same. When closer to one, the response is more variable and creative. Use 0 for consistent responses. Setting this to 0.7 is a common choice for creative or interactive tasks. |
seed |
An integer. When temperature is set to 0 and the seed is constant, the model consistently returns the same response to the same prompt. |
host |
The address where the Ollama API can be reached, e.g.
|
hash |
Defaults to TRUE. If TRUE, adds a column with the hash of all
other components of the prompt. Used internally for caching. Can be added
separately with |
For more details and context about each parameter, see https://github.com/ollama/ollama/blob/main/docs/api.md.
A tibble with all main components of a query, to be passed to
ql_generate()
.
ql_prompt("a haiku")
ql_prompt("a haiku")
Read image in order to pass it to multimodal models
ql_read_images(path)
ql_read_images(path)
path |
Path to image file. |
A list object of character vectors of base 64 encoded images.
if (interactive()) { library("quackingllama") img_path <- fs::file_temp(ext = "png") download.file( url = "https://ollama.com/public/ollama.png", destfile = img_path ) resp_df <- ql_prompt( prompt = "what is this?", images = img_path, model = "llama3.2-vision" ) |> ql_generate() resp_df resp_df$response }
if (interactive()) { library("quackingllama") img_path <- fs::file_temp(ext = "png") download.file( url = "https://ollama.com/public/ollama.png", destfile = img_path ) resp_df <- ql_prompt( prompt = "what is this?", images = img_path, model = "llama3.2-vision" ) |> ql_generate() resp_df resp_df$response }
httr2
request for both generate and chat endpointsCreate httr2
request for both generate and chat endpoints
ql_request( prompt_df, endpoint = "generate", host = NULL, message = NULL, timeout = NULL )
ql_request( prompt_df, endpoint = "generate", host = NULL, message = NULL, timeout = NULL )
endpoint |
Defaults to "generate". Must be either "generate" or "chat". |
host |
The address where the Ollama API can be reached, e.g.
|
timeout |
If not set with |
A httr2
request object.
ql_prompt(prompt = "a haiku") ql_prompt(prompt = "a haiku") |> ql_request() |> httr2::req_dry_run()
ql_prompt(prompt = "a haiku") ql_prompt(prompt = "a haiku") |> ql_request() |> httr2::req_dry_run()
Set options for the local database and enables caching
ql_set_db_options(db_filename = NULL, db_type = "DuckDB", db_folder = ".")
ql_set_db_options(db_filename = NULL, db_type = "DuckDB", db_folder = ".")
db_filename |
Defaults NULL. Internally, defaults to a combination of
|
db_type |
Defaults to |
db_folder |
Defaults to |
Nothing, used for its side effects.
Other database:
ql_disable_db()
,
ql_enable_db()
ql_set_db_options(db_filename = "testing_ground")
ql_set_db_options(db_filename = "testing_ground")
Set basic options for the current session.
ql_set_options( system = NULL, model = NULL, host = NULL, temperature = NULL, seed = NULL, keep_alive = NULL, timeout = NULL )
ql_set_options( system = NULL, model = NULL, host = NULL, temperature = NULL, seed = NULL, keep_alive = NULL, timeout = NULL )
system |
System message to pass to the model. See official documentation for details. For example: "You are a helpful assistant." |
model |
The name of the model, e.g. |
host |
The address where the Ollama API can be reached, e.g.
|
temperature |
Numeric value comprised between 0 and 1 passed to the model. When set to 0 and with the same seed, the response to the same prompt is always exactly the same. When closer to one, the response is more variable and creative. Use 0 for consistent responses. Setting this to 0.7 is a common choice for creative or interactive tasks. |
seed |
An integer. When temperature is set to 0 and the seed is constant, the model consistently returns the same response to the same prompt. |
keep_alive |
Defaults to "5m". Controls controls how long the model will stay loaded into memory following the request. |
timeout |
Time in seconds before the request times out. Defaults to 300 (corresponding to 5 minutes). |
Nothing, used for its side effects. Options can be retrieved with
ql_get_db_options()
ql_set_options( model = "llama3.2", host = "http://localhost:11434", system = "You are a helpful assistant.", temperature = 0, seed = 42 ) ql_get_options()
ql_set_options( model = "llama3.2", host = "http://localhost:11434", system = "You are a helpful assistant.", temperature = 0, seed = 42 ) ql_get_options()