Skip to content

Commit

Permalink
v0.3.3: gpt-4o-mini version
Browse files Browse the repository at this point in the history
  • Loading branch information
kumeS committed Feb 10, 2025
1 parent f3b9641 commit 5957508
Show file tree
Hide file tree
Showing 7 changed files with 250 additions and 118 deletions.
7 changes: 4 additions & 3 deletions DESCRIPTION
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
Package: chatAI4R
Type: Package
Title: Chat-Based Interactive Artificial Intelligence for R
Version: 0.3.4
Date: 2025-02-03
Version: 0.3.5
Date: 2025-02-11
Authors@R: c(
person (given = "Satoshi", family = "Kume",
role = c("aut", "cre"),
Expand Down Expand Up @@ -30,7 +30,8 @@ Imports: httr,
pdftools,
xml2,
rvest,
curl
curl,
base64enc
Suggests: testthat, knitr
License: Artistic-2.0
URL: https://kumes.github.io/chatAI4R/, https://github.com/kumeS/chatAI4R
Expand Down
4 changes: 3 additions & 1 deletion NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,15 @@ export(speakInJA_v2)
export(summaryWebScrapingText)
export(supportIdeaGeneration)
export(textEmbedding)
export(textFileInput4ai)
export(vision4R)
importFrom(assertthat,assert_that)
importFrom(assertthat,is.count)
importFrom(assertthat,is.flag)
importFrom(assertthat,is.number)
importFrom(assertthat,is.string)
importFrom(assertthat,noNA)
importFrom(base64enc,base64encode)
importFrom(clipr,read_clip)
importFrom(clipr,write_clip)
importFrom(crayon,red)
Expand All @@ -67,7 +69,7 @@ importFrom(httr,GET)
importFrom(httr,POST)
importFrom(httr,add_headers)
importFrom(httr,content)
importFrom(httr,upload_file)
importFrom(httr,status_code)
importFrom(httr,write_stream)
importFrom(igraph,add_edges)
importFrom(igraph,add_vertices)
Expand Down
66 changes: 66 additions & 0 deletions R/textFileInput4ai.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
#' Send Text File Content to OpenAI API and Retrieve Response
#'
#' This function reads the content of a specified text file, sends it to the OpenAI API
#' using the provided API key, and retrieves the generated response from the GPT model.
#'
#' @param file_path A string representing the path to the text or csv file to be read and sent to the API.
#' @param api_key A string containing the OpenAI API key. Defaults to the "OPENAI_API_KEY" environment variable.
#' @param model A string specifying the OpenAI model to be used (default is "gpt-4o-mini").
#' @param system_prompt Optional. A system-level instruction that can be used to guide the model's behavior
#' (default is "You are a helpful assistant to analyze your input.").
#' @param max_tokens A numeric value specifying the maximum number of tokens to generate (default is 50).
#'
#' @return A character string containing the response from the OpenAI API.
#' @importFrom httr POST add_headers content
#' @importFrom jsonlite toJSON
#' @author Satoshi Kume
#' @export textFileInput4ai
#' @examples
#' \dontrun{
#' # Example usage of the function
#' api_key <- "YOUR_OPENAI_API_KEY"
#' file_path <- "path/to/your/text_file.txt"
#' response <- textFileInput4ai(file_path, api_key = api_key, max_tokens = 50)
#' cat(response)
#' }

textFileInput4ai <- function(file_path,
model = "gpt-4o-mini",
system_prompt = "You are a helpful assistant to analyze your input.",
max_tokens = 50,
api_key = Sys.getenv("OPENAI_API_KEY") ) {
# Read the text content from the specified file
text_content <- paste(readLines(file_path, warn = FALSE), collapse = "\n")

# Define the OpenAI API endpoint URL
url <- "https://api.openai.com/v1/chat/completions"

# Define the request headers including the API key for authorization
headers <- c(
"Content-Type" = "application/json",
"Authorization" = paste("Bearer", api_key)
)

# Create the JSON-formatted request body with the model, system prompt, file content, and max_tokens
body <- jsonlite::toJSON(list(
model = model,
messages = list(
list(role = "system", content = system_prompt),
list(role = "user", content = text_content)
),
max_tokens = max_tokens
), auto_unbox = TRUE)

# Send a POST request to the OpenAI API with the specified headers and body
response <- httr::POST(url, httr::add_headers(.headers = headers), body = body, encode = "json")

# Parse the response content from the API
result <- httr::content(response, "parsed", encoding = "UTF-8")

# If the API returned valid choices, return the generated text; otherwise, throw an error
if (!is.null(result$choices) && length(result$choices) > 0) {
return(result$choices[[1]]$message$content)
} else {
stop("No response from OpenAI API. Check your API key and input data.")
}
}
149 changes: 74 additions & 75 deletions R/vision4R.R
Original file line number Diff line number Diff line change
@@ -1,107 +1,106 @@
#' Vision API Function using OpenAI's Vision API
#'
#' @title vision4R: Analyze an image using OpenAI's Vision API
#' @description This function uses the OpenAI Vision API to analyze an image provided by the user.
#' The function sends the image along with an optional prompt and system message to the API,
#' and returns the analysis result. The Vision API endpoint is assumed to be similar in structure
#' to other OpenAI endpoints, accepting multipart/form-data uploads.
#' @description This function sends a local image along with a text prompt to OpenAI's GPT-4 Vision API.
#' The function encodes the image in Base64 format and constructs a JSON payload where the user's
#' message contains both text and an image URL (data URI). This structure mimics the provided Python code.
#'
#' @param image_path A string specifying the path to the image file to be analyzed.
#' @param prompt A string containing the analysis prompt for the Vision API (optional).
#' If left empty (i.e., prompt = ""), a notice will be output to remind the user to provide a prompt.
#' @param Model A string specifying the vision model to use. Currently supported models include "gpt-4-vision".
#' @param temperature A numeric value controlling the randomness of the model's output (default: 1).
#' @param system_set A string containing the system message to set the context for the analysis.
#' If provided, it will be included in the request. Default is an empty string.
#' @param api_key A string containing the user's OpenAI API key.
#' Defaults to the value of the environment variable "OPENAI_API_KEY".
#' @param image_path A string specifying the path to the image file. The image format should be png or jpeg.
#' @param user_prompt A string containing the text prompt. Default: "What is in this image?".
#' @param Model The model to use. Defaults to "gpt-4-turbo". Allowed values: "gpt-4-turbo", "gpt-4o-mini".
#' @param temperature A numeric value controlling the randomness of the output (default: 1).
#' @param api_key Your OpenAI API key. Default: environment variable `OPENAI_API_KEY`.
#'
#' @importFrom httr POST add_headers upload_file content
#' @importFrom jsonlite fromJSON
#'
#' @return A data frame containing the analysis result from the Vision API.
#' @importFrom base64enc base64encode
#' @importFrom jsonlite toJSON
#' @importFrom httr POST add_headers content status_code
#' @author Satoshi Kume
#' @export vision4R
#'
#' @return A data frame containing the model's response.
#' @examples
#' \dontrun{
#' Sys.setenv(OPENAI_API_KEY = "Your API key")
#'
#' # Analyze an image with a prompt and system context using a supported model "gpt-4-vision"
#' result <- vision4R(
#' image_path = "path/to/your/image.jpg",
#' prompt = "Describe the scene and identify any objects.",
#' system_set = "You are an expert image analyst.",
#' Model = "gpt-4-vision",
#' temperature = 0.8
#' )
#' print(result)
#'
#' # If prompt is empty, a notice will be output
#' result <- vision4R(
#' image_path = "path/to/your/image.jpg",
#' prompt = "",
#' Model = "gpt-4-vision"
#' )
#' # Example usage of the function
#' api_key <- "YOUR_OPENAI_API_KEY"
#' file_path <- "path/to/your/text_file.txt"
#' vision4R(image_path = file_path, api_key = api_key)
#' }
#'

vision4R <- function(image_path,
prompt = "",
Model = "gpt-4-vision",
user_prompt = "What is depicted in this image?",
Model = "gpt-4o-mini",
temperature = 1,
system_set = "",
api_key = Sys.getenv("OPENAI_API_KEY")) {

# Define the vector of allowed models.
allowed_models <- c("gpt-4-vision")

# Validate that the Model argument is one of the allowed models.
if (!any(Model == allowed_models)) {
stop(paste("Invalid model. The vision4R function only supports the following models:",
paste(allowed_models, collapse = ", ")))
# Validate if API key is available
if (api_key == "") {
stop("Error: API key is missing. Please set the 'OPENAI_API_KEY' environment variable or pass it as an argument.")
}

# Check if prompt is empty and output a notice if it is.
if (prompt == "") {
message("NOTE: The prompt is empty. Please provide a prompt for more accurate analysis.")
# Validate model selection
allowed_models <- c("gpt-4o-mini", "gpt-4o")
if (!Model %in% allowed_models) {
stop("Invalid model. The vision4R function only supports the following models: ",
paste(allowed_models, collapse = ", "))
}

# Define the Vision API endpoint.
api_url <- "https://api.openai.com/v1/vision/completions"
# Encode the image in Base64 format
base64_image <- base64enc::base64encode(image_path)
mime_type <- ifelse(grepl("\\.png$", image_path, ignore.case = TRUE), "image/png", "image/jpeg")
data_uri <- paste0("data:", mime_type, ";base64,", base64_image)

# Construct the body for the API request using multipart/form-data.
# Construct JSON payload with a single message having a content list of two elements
body <- list(
model = Model,
prompt = prompt,
temperature = temperature,
file = httr::upload_file(image_path)
messages = list(
list(
role = "user",
content = list(
list(
type = "text",
text = user_prompt
),
list(
type = "image_url",
image_url = list(
url = data_uri
)
)
)
)
),
temperature = temperature
)

# Include system_set if provided.
if (nzchar(system_set)) {
body$system <- system_set
}

# Configure headers for the API request (Authorization header).
headers <- httr::add_headers(
`Authorization` = paste("Bearer", api_key)
)
# Convert payload to JSON
json_payload <- jsonlite::toJSON(body, auto_unbox = TRUE, pretty = TRUE)

# Send the POST request to the OpenAI Vision API.
# Send POST request to OpenAI Vision API
response <- httr::POST(
url = api_url,
body = body,
encode = "multipart",
config = headers
url = "https://api.openai.com/v1/chat/completions",
body = json_payload,
encode = "json",
httr::add_headers(
`Authorization` = paste("Bearer", api_key),
`Content-Type` = "application/json"
)
)

# Extract and parse the response content.
parsed_content <- httr::content(response, "parsed")
# Check the response status
if (httr::status_code(response) != 200) {
stop("Error: Failed to retrieve a response from OpenAI API. Status code: ",
httr::status_code(response))
}

# Parse the response content
parsed_content <- httr::content(response, as = "parsed", type = "application/json")
#parsed_content$choices[[1]]$message$content

# Process and return the result.
# This example assumes the API response contains a 'choices' element with the analysis result.
# Return the result
if (!is.null(parsed_content$choices) && length(parsed_content$choices) > 0) {
return(data.frame(content = parsed_content$choices[[1]]$message$content, stringsAsFactors = FALSE))
return(data.frame(content = parsed_content$choices[[1]]$message$content,
stringsAsFactors = FALSE))
} else {
return(parsed_content)
stop("Error: Unexpected response format or no results returned.")
}
}
39 changes: 39 additions & 0 deletions inst/vignettes/WebSearch_plus_LLM_plus_SNS.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,14 @@ url <- clipr::read_clip()
body_content <- extract_and_decode_main_content(url)
print(body_content)
#Pass URLs copied to the clipboard and return output to the clipboard
library(magrittr)
clipr::read_clip() %>%
extract_and_decode_main_content() %>%
clipr::write_clip()
```

## Step 2: Generate and Fine-Tune the Draft
Expand Down Expand Up @@ -99,6 +107,37 @@ res <- clipr::read_clip() %>%
temperature = 1,
system_set = system_set
)
```

# translate into your language

```{r echo=TRUE, eval=FALSE}
```

# posting to twitter

Twitter Developer Portalにアクセスして、アカウント登録をする。

https://developer.x.com/en/portal/dashboard

```{r echo=TRUE, eval=FALSE}
#ロード
#install.packages("rtweet")
library(rtweet)
client_list()
rtweet::auth_as()
#テキストをつぶやく
tweet_post( text = "何かをつぶやく" )
tweet_search_recent(q="つぶやき")
#Twitterサイトを開いて、確認する
browseURL("https://twitter.com/home?lang=ja")
```

# Conclusion
Expand Down
45 changes: 45 additions & 0 deletions man/textFileInput4ai.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 5957508

Please sign in to comment.