Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 7 additions & 7 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -517,7 +517,7 @@ jobs:
env.HA_SHORT_VERSION }}-
- name: Install additional OS dependencies
if: steps.cache-venv.outputs.cache-hit != 'true'
timeout-minutes: 5
timeout-minutes: 10
run: |
sudo rm /etc/apt/sources.list.d/microsoft-prod.list
sudo apt-get update
Expand Down Expand Up @@ -579,7 +579,7 @@ jobs:
- base
steps:
- name: Install additional OS dependencies
timeout-minutes: 5
timeout-minutes: 10
run: |
sudo rm /etc/apt/sources.list.d/microsoft-prod.list
sudo apt-get update
Expand Down Expand Up @@ -879,7 +879,7 @@ jobs:
name: Split tests for full run
steps:
- name: Install additional OS dependencies
timeout-minutes: 5
timeout-minutes: 10
run: |
sudo rm /etc/apt/sources.list.d/microsoft-prod.list
sudo apt-get update
Expand Down Expand Up @@ -940,7 +940,7 @@ jobs:
Run tests Python ${{ matrix.python-version }} (${{ matrix.group }})
steps:
- name: Install additional OS dependencies
timeout-minutes: 5
timeout-minutes: 10
run: |
sudo rm /etc/apt/sources.list.d/microsoft-prod.list
sudo apt-get update
Expand Down Expand Up @@ -1074,7 +1074,7 @@ jobs:
Run ${{ matrix.mariadb-group }} tests Python ${{ matrix.python-version }}
steps:
- name: Install additional OS dependencies
timeout-minutes: 5
timeout-minutes: 10
run: |
sudo rm /etc/apt/sources.list.d/microsoft-prod.list
sudo apt-get update
Expand Down Expand Up @@ -1215,7 +1215,7 @@ jobs:
Run ${{ matrix.postgresql-group }} tests Python ${{ matrix.python-version }}
steps:
- name: Install additional OS dependencies
timeout-minutes: 5
timeout-minutes: 10
run: |
sudo rm /etc/apt/sources.list.d/microsoft-prod.list
sudo apt-get update
Expand Down Expand Up @@ -1377,7 +1377,7 @@ jobs:
Run tests Python ${{ matrix.python-version }} (${{ matrix.group }})
steps:
- name: Install additional OS dependencies
timeout-minutes: 5
timeout-minutes: 10
run: |
sudo rm /etc/apt/sources.list.d/microsoft-prod.list
sudo apt-get update
Expand Down
1 change: 0 additions & 1 deletion homeassistant/components/assist_satellite/intent.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,6 @@ async def async_handle(self, intent_obj: intent.Intent) -> intent.IntentResponse
)

response = intent_obj.create_response()
response.response_type = intent.IntentResponseType.ACTION_DONE
response.async_set_results(
success_results=[
intent.IntentResponseTarget(
Expand Down
2 changes: 1 addition & 1 deletion homeassistant/components/bluetooth/manifest.json
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,6 @@
"bluetooth-auto-recovery==1.5.2",
"bluetooth-data-tools==1.28.2",
"dbus-fast==2.44.3",
"habluetooth==5.3.1"
"habluetooth==5.5.1"
]
}
4 changes: 0 additions & 4 deletions homeassistant/components/bring/strings.json
Original file line number Diff line number Diff line change
Expand Up @@ -164,10 +164,6 @@
"name": "[%key:component::notify::services::notify::name%]",
"description": "Sends a mobile push notification to members of a shared Bring! list.",
"fields": {
"entity_id": {
"name": "List",
"description": "Bring! list whose members (except sender) will be notified."
},
"message": {
"name": "Notification type",
"description": "Type of push notification to send to list members."
Expand Down
1 change: 0 additions & 1 deletion homeassistant/components/climate/intent.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@ async def async_handle(self, intent_obj: intent.Intent) -> intent.IntentResponse
)

response = intent_obj.create_response()
response.response_type = intent.IntentResponseType.ACTION_DONE
response.async_set_results(
success_results=[
intent.IntentResponseTarget(
Expand Down
1 change: 0 additions & 1 deletion homeassistant/components/conversation/default_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,6 @@ async def _async_handle_message(
response = intent.IntentResponse(
language=user_input.language or self.hass.config.language
)
response.response_type = intent.IntentResponseType.ACTION_DONE
response.async_set_speech(response_text)

if response is None:
Expand Down
2 changes: 1 addition & 1 deletion homeassistant/components/eq3btsmart/manifest.json
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,5 @@
"integration_type": "device",
"iot_class": "local_polling",
"loggers": ["eq3btsmart"],
"requirements": ["eq3btsmart==2.1.0", "bleak-esphome==3.2.0"]
"requirements": ["eq3btsmart==2.1.0", "bleak-esphome==3.3.0"]
}
2 changes: 1 addition & 1 deletion homeassistant/components/esphome/manifest.json
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
"requirements": [
"aioesphomeapi==40.0.1",
"esphome-dashboard-api==1.3.0",
"bleak-esphome==3.2.0"
"bleak-esphome==3.3.0"
],
"zeroconf": ["_esphomelib._tcp.local."]
}
2 changes: 1 addition & 1 deletion homeassistant/components/google_cloud/strings.json
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
"gain": "Default volume gain (in dB) of the voice",
"profiles": "Default audio profiles",
"text_type": "Default text type",
"stt_model": "STT model"
"stt_model": "Speech-to-Text model"
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
device_registry as dr,
entity_registry as er,
)
from homeassistant.helpers.issue_registry import IssueSeverity, async_create_issue
from homeassistant.helpers.typing import ConfigType

from .const import (
Expand Down Expand Up @@ -72,18 +71,6 @@ async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
async def generate_content(call: ServiceCall) -> ServiceResponse:
"""Generate content from text and optionally images."""

if call.data[CONF_IMAGE_FILENAME]:
# Deprecated in 2025.3, to remove in 2025.9
async_create_issue(
hass,
DOMAIN,
"deprecated_image_filename_parameter",
breaks_in_ha_version="2025.9.0",
is_fixable=False,
severity=IssueSeverity.WARNING,
translation_key="deprecated_image_filename_parameter",
)

prompt_parts = [call.data[CONF_PROMPT]]

config_entry: GoogleGenerativeAIConfigEntry = (
Expand All @@ -92,7 +79,7 @@ async def generate_content(call: ServiceCall) -> ServiceResponse:

client = config_entry.runtime_data

files = call.data[CONF_IMAGE_FILENAME] + call.data[CONF_FILENAMES]
files = call.data[CONF_FILENAMES]

if files:
for filename in files:
Expand Down Expand Up @@ -140,9 +127,6 @@ async def generate_content(call: ServiceCall) -> ServiceResponse:
schema=vol.Schema(
{
vol.Required(CONF_PROMPT): cv.string,
vol.Optional(CONF_IMAGE_FILENAME, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_FILENAMES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
Expand Down
123 changes: 117 additions & 6 deletions homeassistant/components/google_generative_ai_conversation/ai_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,10 @@
from __future__ import annotations

from json import JSONDecodeError
from typing import TYPE_CHECKING

from google.genai.errors import APIError
from google.genai.types import GenerateContentConfig, Part, PartUnionDict

from homeassistant.components import ai_task, conversation
from homeassistant.config_entries import ConfigEntry
Expand All @@ -11,8 +15,17 @@
from homeassistant.helpers.entity_platform import AddConfigEntryEntitiesCallback
from homeassistant.util.json import json_loads

from .const import LOGGER
from .entity import ERROR_GETTING_RESPONSE, GoogleGenerativeAILLMBaseEntity
from .const import CONF_CHAT_MODEL, CONF_RECOMMENDED, LOGGER, RECOMMENDED_IMAGE_MODEL
from .entity import (
ERROR_GETTING_RESPONSE,
GoogleGenerativeAILLMBaseEntity,
async_prepare_files_for_prompt,
)

if TYPE_CHECKING:
from homeassistant.config_entries import ConfigSubentry

from . import GoogleGenerativeAIConfigEntry


async def async_setup_entry(
Expand All @@ -37,10 +50,22 @@ class GoogleGenerativeAITaskEntity(
):
"""Google Generative AI AI Task entity."""

_attr_supported_features = (
ai_task.AITaskEntityFeature.GENERATE_DATA
| ai_task.AITaskEntityFeature.SUPPORT_ATTACHMENTS
)
def __init__(
self,
entry: GoogleGenerativeAIConfigEntry,
subentry: ConfigSubentry,
) -> None:
"""Initialize the entity."""
super().__init__(entry, subentry)
self._attr_supported_features = (
ai_task.AITaskEntityFeature.GENERATE_DATA
| ai_task.AITaskEntityFeature.SUPPORT_ATTACHMENTS
)

if subentry.data.get(CONF_RECOMMENDED) or "-image" in subentry.data.get(
CONF_CHAT_MODEL, ""
):
self._attr_supported_features |= ai_task.AITaskEntityFeature.GENERATE_IMAGE

async def _async_generate_data(
self,
Expand Down Expand Up @@ -79,3 +104,89 @@ async def _async_generate_data(
conversation_id=chat_log.conversation_id,
data=data,
)

async def _async_generate_image(
self,
task: ai_task.GenImageTask,
chat_log: conversation.ChatLog,
) -> ai_task.GenImageTaskResult:
"""Handle a generate image task."""
# Get the user prompt from the chat log
user_message = chat_log.content[-1]
assert isinstance(user_message, conversation.UserContent)

model = self.subentry.data.get(CONF_CHAT_MODEL, RECOMMENDED_IMAGE_MODEL)
prompt_parts: list[PartUnionDict] = [user_message.content]
if user_message.attachments:
prompt_parts.extend(
await async_prepare_files_for_prompt(
self.hass,
self._genai_client,
[a.path for a in user_message.attachments],
)
)

try:
response = await self._genai_client.aio.models.generate_content(
model=model,
contents=prompt_parts,
config=GenerateContentConfig(
response_modalities=["TEXT", "IMAGE"],
),
)
except (APIError, ValueError) as err:
LOGGER.error("Error generating image: %s", err)
raise HomeAssistantError(f"Error generating image: {err}") from err

if response.prompt_feedback:
raise HomeAssistantError(
f"Error generating content due to content violations, reason: {response.prompt_feedback.block_reason_message}"
)

if (
not response.candidates
or not response.candidates[0].content
or not response.candidates[0].content.parts
):
raise HomeAssistantError("Unknown error generating image")

# Parse response
response_text = ""
response_image: Part | None = None
for part in response.candidates[0].content.parts:
if (
part.inline_data
and part.inline_data.data
and part.inline_data.mime_type
and part.inline_data.mime_type.startswith("image/")
):
if response_image is None:
response_image = part
else:
LOGGER.warning("Prompt generated multiple images")
elif isinstance(part.text, str) and not part.thought:
response_text += part.text

if response_image is None:
raise HomeAssistantError("Response did not include image")

assert response_image.inline_data is not None
assert response_image.inline_data.data is not None
assert response_image.inline_data.mime_type is not None

image_data = response_image.inline_data.data
mime_type = response_image.inline_data.mime_type

chat_log.async_add_assistant_content_without_tools(
conversation.AssistantContent(
agent_id=self.entity_id,
content=response_text,
)
)

return ai_task.GenImageTaskResult(
image_data=image_data,
conversation_id=chat_log.conversation_id,
mime_type=mime_type,
model=model.partition("/")[-1],
)
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
RECOMMENDED_CHAT_MODEL = "models/gemini-2.5-flash"
RECOMMENDED_STT_MODEL = RECOMMENDED_CHAT_MODEL
RECOMMENDED_TTS_MODEL = "models/gemini-2.5-flash-preview-tts"
RECOMMENDED_IMAGE_MODEL = "models/gemini-2.5-flash-image-preview"
CONF_TEMPERATURE = "temperature"
RECOMMENDED_TEMPERATURE = 1.0
CONF_TOP_P = "top_p"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -448,12 +448,13 @@ async def _async_handle_chat_log(
assert isinstance(user_message, conversation.UserContent)
chat_request: list[PartUnionDict] = [user_message.content]
if user_message.attachments:
files = await async_prepare_files_for_prompt(
self.hass,
self._genai_client,
[a.path for a in user_message.attachments],
chat_request.extend(
await async_prepare_files_for_prompt(
self.hass,
self._genai_client,
[a.path for a in user_message.attachments],
)
)
chat_request = [*chat_request, *files]

# To prevent infinite loops, we limit the number of iterations
for _iteration in range(MAX_TOOL_ITERATIONS):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,6 @@ generate_content:
selector:
text:
multiline: true
image_filename:
required: false
selector:
object:
filenames:
required: false
selector:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -160,23 +160,12 @@
"description": "The prompt",
"example": "Describe what you see in these images"
},
"image_filename": {
"name": "Image filename",
"description": "Deprecated. Use filenames instead.",
"example": "/config/www/image.jpg"
},
"filenames": {
"name": "Attachment filenames",
"description": "Attachments to add to the prompt (images, PDFs, etc)",
"example": "/config/www/image.jpg"
}
}
}
},
"issues": {
"deprecated_image_filename_parameter": {
"title": "Deprecated 'image_filename' parameter",
"description": "The 'image_filename' parameter in Google Generative AI actions is deprecated. Please edit scripts and automations to use 'filenames' instead."
}
}
}
2 changes: 1 addition & 1 deletion homeassistant/components/homee/manifest.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"iot_class": "local_push",
"loggers": ["homee"],
"quality_scale": "silver",
"requirements": ["pyHomee==1.2.10"],
"requirements": ["pyHomee==1.3.8"],
"zeroconf": [
{
"type": "_ssh._tcp.local.",
Expand Down
Loading
Loading