-
Notifications
You must be signed in to change notification settings - Fork 1
/
palm.txt.R
188 lines (183 loc) · 7.7 KB
/
palm.txt.R
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
#' Generate text using the Google PaLM 2 text model based on a prompt
#'
#' This function sends a prompt to the Google PaLM 2 text model and generates text as a response. It allows customization of the
#' generated text using various parameters.
#'
#' @param model.parameter A character vector containing the API key, model version, and proxy status. Model version and type
#' are specified by Google. See function \code{\link{palm.connect}} for detail.
#' @param prompt A character string representing the query or prompt for text generation. The length of the query should
#' be between 1 and 8196 characters, inclusive.
#' @param temperature A numeric value between 0.0 and 1.0, inclusive (default: 0.7). Controls the randomness of the generated text.
#' A higher value (e.g., 0.9) results in more creative responses, while a lower value (e.g., 0.3) produces more
#' straightforward text.
#' @param maxOutputTokens An integer value (default: 1024). Specifies the maximum number of tokens to
#' include in the generated text.
#' @param topP A numeric value (default: 0.95). Defines the maximum cumulative probability of tokens
#' considered when sampling. It controls the diversity of the text generated.
#' @param topK An integer value (default: 40). Sets the maximum number of tokens to consider
#' when sampling.
#' @param htUnspecified Safety setting threshold for unspecified harm. The default threshold is "meda". Valid options are as follows.
#'
#' \itemize{
#' \item{"unsp"}{HARM_BLOCK_THRESHOLD_UNSPECIFIED}
#' \item{"lowa"}{BLOCK_LOW_AND_ABOVE}
#' \item{"meda"}{BLOCK_MEDIUM_AND_ABOVE}
#' \item{"high"}{BLOCK_ONLY_HIGH}
#' \item{"none"}{BLOCK_NONE}
#' }
#'
#' @param htDerogatory Safety setting threshold for derogatory harm. The default threshold is "meda". Valid options are as follows.
#'
#' \itemize{
#' \item{"unsp"}{HARM_BLOCK_THRESHOLD_UNSPECIFIED}
#' \item{"lowa"}{BLOCK_LOW_AND_ABOVE}
#' \item{"meda"}{BLOCK_MEDIUM_AND_ABOVE}
#' \item{"high"}{BLOCK_ONLY_HIGH}
#' \item{"none"}{BLOCK_NONE}
#' }
#'
#' @param htToxicity Safety setting threshold for toxicity harm. The default threshold is "meda". Valid options are as follows.
#'
#' \itemize{
#' \item{"unsp"}{HARM_BLOCK_THRESHOLD_UNSPECIFIED}
#' \item{"lowa"}{BLOCK_LOW_AND_ABOVE}
#' \item{"meda"}{BLOCK_MEDIUM_AND_ABOVE}
#' \item{"high"}{BLOCK_ONLY_HIGH}
#' \item{"none"}{BLOCK_NONE}
#' }
#'
#' @param htViolence Safety setting threshold for violence harm. The default threshold is "meda". Valid options are as follows.
#'
#' \itemize{
#' \item{"unsp"}{HARM_BLOCK_THRESHOLD_UNSPECIFIED}
#' \item{"lowa"}{BLOCK_LOW_AND_ABOVE}
#' \item{"meda"}{BLOCK_MEDIUM_AND_ABOVE}
#' \item{"high"}{BLOCK_ONLY_HIGH}
#' \item{"none"}{BLOCK_NONE}
#' }
#'
#' @param htSexual Safety setting threshold for sexual harm. The default threshold is "meda". Valid options are as follows.
#'
#' \itemize{
#' \item{"unsp"}{HARM_BLOCK_THRESHOLD_UNSPECIFIED}
#' \item{"lowa"}{BLOCK_LOW_AND_ABOVE}
#' \item{"meda"}{BLOCK_MEDIUM_AND_ABOVE}
#' \item{"high"}{BLOCK_ONLY_HIGH}
#' \item{"none"}{BLOCK_NONE}
#' }
#'
#' @param htMedical Safety setting threshold for medical harm. The default threshold is "meda". Valid options are as follows.
#'
#' \itemize{
#' \item{"unsp"}{HARM_BLOCK_THRESHOLD_UNSPECIFIED}
#' \item{"lowa"}{BLOCK_LOW_AND_ABOVE}
#' \item{"meda"}{BLOCK_MEDIUM_AND_ABOVE}
#' \item{"high"}{BLOCK_ONLY_HIGH}
#' \item{"none"}{BLOCK_NONE}
#' }
#'
#' @param htDangerous Safety setting threshold for dangerous harm. The default threshold is "meda". Valid options are as follows.
#'
#' \itemize{
#' \item{"unsp"}{HARM_BLOCK_THRESHOLD_UNSPECIFIED}
#' \item{"lowa"}{BLOCK_LOW_AND_ABOVE}
#' \item{"meda"}{BLOCK_MEDIUM_AND_ABOVE}
#' \item{"high"}{BLOCK_ONLY_HIGH}
#' \item{"none"}{BLOCK_NONE}
#' }
#'
#' @return A character string generated by the Google PaLM 2 API based on the provided prompt and parameters.
#'
#' @details
#' This function interacts with the Google PaLM model by sending a query using the specified parameters. It allows you
#' to customize the generated text by adjusting the `temperature`, `maxOutputTokens`, `topP`, `topK`, and safety settings.
#'
#' If the function is successful, it returns a character string containing the generated text. If an error occurs during
#' the API request, it will stop execution and provide an error message.
#'
#' The `model.parameter` argument should be a character vector with the API key, model version, and model type provided by
#' Google. You can obtain this information by following the instructions provided by Google for using the PaLM API.
#'
#' The safety settings control the content's safety level based on different harm categories. Harm thresholds are
#' specified as per Google's guidelines and can be customized to control the content generated.
#'
#' @examples
#' \dontrun{
#' # Connect to the model, replace API_KEY with your api key
#' palm.model = palm.connect("v1beta2",
#' "API_KEY",
#' FALSE)
#'
#' prompt = "Write a story about a magic backpack."
#' generated.text = palm.txt(palm.model,
#' prompt)
#' cat(generated.text)
#' }
#'
#' @seealso
#' \href{https://palmr.ly.gd.edu.kg/documentation/}{PaLMr - Documentation}
#'
#' \href{https://ai.google.dev/api/rest/v1beta/SafetySetting}{Safety Setting - Google AI for Developers}
#'
#' \href{https://ai.google.dev/api/rest/v1beta/HarmCategory}{HarmCategory - Google AI for Developers}
#'
#' @export
#'
#' @importFrom jsonlite toJSON
#' @importFrom httr GET POST add_headers content
palm.txt = function(model.parameter,
prompt,
temperature = 0.7,
maxOutputTokens = 1024,
topP = 0.95,
topK = 40,
htUnspecified = "meda",
htDerogatory = "meda",
htToxicity = "meda",
htViolence = "meda",
htSexual = "meda",
htMedical = "meda",
htDangerous = "meda") {
model.parameter["version"] = match.arg(model.parameter["version"],
c("v1beta2", "v1beta3"),
several.ok = FALSE)
apiURL = ifelse(
model.parameter["proxy"],
paste0(
"https://api.genai.gd.edu.kg/google/",
model.parameter["version"],
"/models/text-bison-001",
":generateText?key=",
model.parameter["api"]
),
paste0(
"https://generativelanguage.googleapis.com/",
model.parameter["version"],
"/models/text-bison-001",
":generateText?key=",
model.parameter["api"]
)
)
requestBody = list(
prompt = list(
text = prompt
),
safetySettings = generateSafetySettings(htUnspecified,
htDerogatory,
htToxicity,
htViolence,
htSexual,
htMedical,
htDangerous),
temperature = temperature,
maxOutputTokens = as.integer(maxOutputTokens),
topP = topP,
topK = as.integer(topK)
)
requestBodyJSON = jsonlite::toJSON(requestBody, auto_unbox = TRUE)
response = httr::POST(url = apiURL,
body = requestBodyJSON,
httr::add_headers("Content-Type" = "application/json"))
responseJSON = httr::content(response, "parsed")
generateOutput(responseJSON)
}