diff --git a/firebase-ai/src/main/kotlin/com/google/firebase/ai/java/LiveSessionFutures.kt b/firebase-ai/src/main/kotlin/com/google/firebase/ai/java/LiveSessionFutures.kt index b15e0522591..b37a4ce8c2b 100644 --- a/firebase-ai/src/main/kotlin/com/google/firebase/ai/java/LiveSessionFutures.kt +++ b/firebase-ai/src/main/kotlin/com/google/firebase/ai/java/LiveSessionFutures.kt @@ -197,9 +197,12 @@ public abstract class LiveSessionFutures internal constructor() { public abstract fun sendVideoRealtime(video: InlineData): ListenableFuture /** - * Sends text data to the server in realtime. Check - * https://ai.google.dev/api/live#bidigeneratecontentrealtimeinput for details about the realtime - * input usage. + * For details about the realtime input usage, see the `BidiGenerateContentRealtimeInput` + * documentation ( + * [Gemini Developer API](https://ai.google.dev/api/live#bidigeneratecontentrealtimeinput) or + * [Vertex AI Gemini API](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/model-reference/multimodal-live#bidigeneratecontentrealtimeinput) + * ). + * * @param text The text data to send. */ public abstract fun sendTextRealtime(text: String): ListenableFuture @@ -211,7 +214,7 @@ public abstract class LiveSessionFutures internal constructor() { * * @param mediaChunks The list of [MediaData] instances representing the media data to be sent. */ - @Deprecated("Use sendAudioRealtime, sendVideoRealtime, or sendTextRealtime instead") + @Deprecated("Use `sendAudioRealtime`, `sendVideoRealtime`, or `sendTextRealtime` instead") public abstract fun sendMediaStream(mediaChunks: List): ListenableFuture /** diff --git a/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/Candidate.kt b/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/Candidate.kt index 5b0c57ce61a..e14d768dac8 100644 --- a/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/Candidate.kt +++ b/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/Candidate.kt @@ -84,7 +84,7 @@ internal constructor( * * The rating will be restricted to a particular [category]. * - * @property category The category of harm being assessed (e.g., Hate speech). + * @property category The category of harm being assessed (for example, Hate speech). * @property probability The likelihood of the content causing harm. * @property probabilityScore A numerical score representing the probability of harm, between `0` * and `1`. diff --git a/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/ContentModality.kt b/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/ContentModality.kt index bfdf8831a43..e4551a25a74 100644 --- a/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/ContentModality.kt +++ b/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/ContentModality.kt @@ -71,7 +71,7 @@ public class ContentModality private constructor(public val ordinal: Int) { /** Audio. */ @JvmField public val AUDIO: ContentModality = ContentModality(4) - /** Document, e.g. PDF. */ + /** Document (for example, PDF). */ @JvmField public val DOCUMENT: ContentModality = ContentModality(5) } } diff --git a/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/LiveSession.kt b/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/LiveSession.kt index ea5daee30f3..f332a67aba3 100644 --- a/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/LiveSession.kt +++ b/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/LiveSession.kt @@ -342,7 +342,7 @@ internal constructor( * * @param video Encoded image data extracted from a frame of the video, used to update the model * on the client's conversation, with the corresponding IANA standard MIME type of the video frame - * data (e.g., `image/png`, `image/jpeg`, etc.). + * data (for example, `image/png`, `image/jpeg`, etc.). */ public suspend fun sendVideoRealtime(video: InlineData) { FirebaseAIException.catchAsync { diff --git a/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/MediaData.kt b/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/MediaData.kt index 7647c687934..237cf72e599 100644 --- a/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/MediaData.kt +++ b/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/MediaData.kt @@ -27,7 +27,7 @@ import kotlinx.serialization.Serializable * [Firebase documentation](https://firebase.google.com/docs/vertex-ai/input-file-requirements). */ @PublicPreviewAPI -@Deprecated("Use InlineData instead", ReplaceWith("InlineData")) +@Deprecated("Use `InlineData` instead", ReplaceWith("InlineData")) public class MediaData(public val data: ByteArray, public val mimeType: String) { @Serializable internal class Internal( diff --git a/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/ResponseModality.kt b/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/ResponseModality.kt index 4c1586227a2..8c2fa701894 100644 --- a/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/ResponseModality.kt +++ b/firebase-ai/src/main/kotlin/com/google/firebase/ai/type/ResponseModality.kt @@ -20,7 +20,7 @@ import com.google.firebase.ai.common.util.FirstOrdinalSerializer import kotlinx.serialization.KSerializer import kotlinx.serialization.Serializable -/** Represents the type of content present in a response (e.g., text, image, audio). */ +/** Represents the type of content present in a response (for example, text, image, audio). */ public class ResponseModality private constructor(public val ordinal: Int) { @Serializable(Internal.Serializer::class)