diff --git a/scaleway-async/scaleway_async/kafka/__init__.py b/scaleway-async/scaleway_async/kafka/__init__.py new file mode 100644 index 000000000..8b74a5ed7 --- /dev/null +++ b/scaleway-async/scaleway_async/kafka/__init__.py @@ -0,0 +1,2 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. diff --git a/scaleway-async/scaleway_async/kafka/v1alpha1/__init__.py b/scaleway-async/scaleway_async/kafka/v1alpha1/__init__.py new file mode 100644 index 000000000..ae2f05903 --- /dev/null +++ b/scaleway-async/scaleway_async/kafka/v1alpha1/__init__.py @@ -0,0 +1,91 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. +from .types import ClusterStatus +from .content import CLUSTER_TRANSIENT_STATUSES +from .types import ListClustersRequestOrderBy +from .types import ListUsersRequestOrderBy +from .types import NodeTypeStock +from .types import VolumeType +from .types import EndpointPrivateNetworkDetails +from .types import EndpointPublicDetails +from .types import VersionAvailableSettingBooleanProperty +from .types import VersionAvailableSettingFloatProperty +from .types import VersionAvailableSettingIntegerProperty +from .types import VersionAvailableSettingStringProperty +from .types import EndpointSpecPrivateNetworkDetails +from .types import EndpointSpecPublicDetails +from .types import ClusterSetting +from .types import Endpoint +from .types import Volume +from .types import NodeTypeVolumeType +from .types import VersionAvailableSetting +from .types import CreateClusterRequestVolumeSpec +from .types import EndpointSpec +from .types import Cluster +from .types import NodeType +from .types import User +from .types import Version +from .types import CreateClusterRequest +from .types import CreateEndpointRequest +from .types import DeleteClusterRequest +from .types import DeleteEndpointRequest +from .types import GetClusterCertificateAuthorityRequest +from .types import GetClusterRequest +from .types import ListClustersRequest +from .types import ListClustersResponse +from .types import ListNodeTypesRequest +from .types import ListNodeTypesResponse +from .types import ListUsersRequest +from .types import ListUsersResponse +from .types import ListVersionsRequest +from .types import ListVersionsResponse +from .types import RenewClusterCertificateAuthorityRequest +from .types import UpdateClusterRequest +from .types import UpdateUserRequest +from .api import KafkaV1Alpha1API + +__all__ = [ + "ClusterStatus", + "CLUSTER_TRANSIENT_STATUSES", + "ListClustersRequestOrderBy", + "ListUsersRequestOrderBy", + "NodeTypeStock", + "VolumeType", + "EndpointPrivateNetworkDetails", + "EndpointPublicDetails", + "VersionAvailableSettingBooleanProperty", + "VersionAvailableSettingFloatProperty", + "VersionAvailableSettingIntegerProperty", + "VersionAvailableSettingStringProperty", + "EndpointSpecPrivateNetworkDetails", + "EndpointSpecPublicDetails", + "ClusterSetting", + "Endpoint", + "Volume", + "NodeTypeVolumeType", + "VersionAvailableSetting", + "CreateClusterRequestVolumeSpec", + "EndpointSpec", + "Cluster", + "NodeType", + "User", + "Version", + "CreateClusterRequest", + "CreateEndpointRequest", + "DeleteClusterRequest", + "DeleteEndpointRequest", + "GetClusterCertificateAuthorityRequest", + "GetClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "ListNodeTypesRequest", + "ListNodeTypesResponse", + "ListUsersRequest", + "ListUsersResponse", + "ListVersionsRequest", + "ListVersionsResponse", + "RenewClusterCertificateAuthorityRequest", + "UpdateClusterRequest", + "UpdateUserRequest", + "KafkaV1Alpha1API", +] diff --git a/scaleway-async/scaleway_async/kafka/v1alpha1/api.py b/scaleway-async/scaleway_async/kafka/v1alpha1/api.py new file mode 100644 index 000000000..8e1693263 --- /dev/null +++ b/scaleway-async/scaleway_async/kafka/v1alpha1/api.py @@ -0,0 +1,808 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. + +from typing import Awaitable, Optional, Union + +from scaleway_core.api import API +from scaleway_core.bridge import ( + Region as ScwRegion, + ScwFile, + unmarshal_ScwFile, +) +from scaleway_core.utils import ( + WaitForOptions, + random_name, + validate_path_param, + fetch_all_pages_async, + wait_for_resource_async, +) +from .types import ( + ListClustersRequestOrderBy, + ListUsersRequestOrderBy, + Cluster, + CreateClusterRequest, + CreateClusterRequestVolumeSpec, + CreateEndpointRequest, + Endpoint, + EndpointSpec, + ListClustersResponse, + ListNodeTypesResponse, + ListUsersResponse, + ListVersionsResponse, + NodeType, + UpdateClusterRequest, + UpdateUserRequest, + User, + Version, +) +from .content import ( + CLUSTER_TRANSIENT_STATUSES, +) +from .marshalling import ( + unmarshal_Endpoint, + unmarshal_Cluster, + unmarshal_User, + unmarshal_ListClustersResponse, + unmarshal_ListNodeTypesResponse, + unmarshal_ListUsersResponse, + unmarshal_ListVersionsResponse, + marshal_CreateClusterRequest, + marshal_CreateEndpointRequest, + marshal_UpdateClusterRequest, + marshal_UpdateUserRequest, +) + + +class KafkaV1Alpha1API(API): + """ + This API allows you to manage your Clusters for Apache Kafka®. This product is currently in Private Beta. + """ + + async def list_node_types( + self, + *, + region: Optional[ScwRegion] = None, + include_disabled_types: Optional[bool] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> ListNodeTypesResponse: + """ + List available node types. + :param region: Region to target. If none is passed will use default region from the config. + :param include_disabled_types: Defines whether or not to include disabled types. + :param page: + :param page_size: + :return: :class:`ListNodeTypesResponse ` + + Usage: + :: + + result = await api.list_node_types() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/kafka/v1alpha1/regions/{param_region}/node-types", + params={ + "include_disabled_types": include_disabled_types, + "page": page, + "page_size": page_size or self.client.default_page_size, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListNodeTypesResponse(res.json()) + + async def list_node_types_all( + self, + *, + region: Optional[ScwRegion] = None, + include_disabled_types: Optional[bool] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> list[NodeType]: + """ + List available node types. + :param region: Region to target. If none is passed will use default region from the config. + :param include_disabled_types: Defines whether or not to include disabled types. + :param page: + :param page_size: + :return: :class:`list[NodeType] ` + + Usage: + :: + + result = await api.list_node_types_all() + """ + + return await fetch_all_pages_async( + type=ListNodeTypesResponse, + key="node_types", + fetcher=self.list_node_types, + args={ + "region": region, + "include_disabled_types": include_disabled_types, + "page": page, + "page_size": page_size, + }, + ) + + async def list_versions( + self, + *, + region: Optional[ScwRegion] = None, + version: Optional[str] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> ListVersionsResponse: + """ + List Kafka versions. + List all available versions of Kafka at the current time. + :param region: Region to target. If none is passed will use default region from the config. + :param version: Kafka version to filter for. + :param page: The page number to return, from the paginated results. + :param page_size: The number of items to return. + :return: :class:`ListVersionsResponse ` + + Usage: + :: + + result = await api.list_versions() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/kafka/v1alpha1/regions/{param_region}/versions", + params={ + "page": page, + "page_size": page_size or self.client.default_page_size, + "version": version, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListVersionsResponse(res.json()) + + async def list_versions_all( + self, + *, + region: Optional[ScwRegion] = None, + version: Optional[str] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> list[Version]: + """ + List Kafka versions. + List all available versions of Kafka at the current time. + :param region: Region to target. If none is passed will use default region from the config. + :param version: Kafka version to filter for. + :param page: The page number to return, from the paginated results. + :param page_size: The number of items to return. + :return: :class:`list[Version] ` + + Usage: + :: + + result = await api.list_versions_all() + """ + + return await fetch_all_pages_async( + type=ListVersionsResponse, + key="versions", + fetcher=self.list_versions, + args={ + "region": region, + "version": version, + "page": page, + "page_size": page_size, + }, + ) + + async def list_clusters( + self, + *, + region: Optional[ScwRegion] = None, + tags: Optional[list[str]] = None, + name: Optional[str] = None, + order_by: Optional[ListClustersRequestOrderBy] = None, + organization_id: Optional[str] = None, + project_id: Optional[str] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> ListClustersResponse: + """ + List Kafka clusters. + List all Kafka clusters in the specified region. By default, the clusters returned in the list are ordered by creation date in ascending order, though this can be modified via the order_by field. You can define additional parameters for your query, such as `tags` and `name`. For the `name` parameter, the value you include will be checked against the whole name string to see if it includes the string you put in the parameter. + :param region: Region to target. If none is passed will use default region from the config. + :param tags: List Kafka cluster with a given tag. + :param name: Lists Kafka clusters that match a name pattern. + :param order_by: Criteria to use when ordering Kafka cluster listings. + :param organization_id: Organization ID of the Kafka cluster. + :param project_id: Project ID. + :param page: + :param page_size: + :return: :class:`ListClustersResponse ` + + Usage: + :: + + result = await api.list_clusters() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/kafka/v1alpha1/regions/{param_region}/clusters", + params={ + "name": name, + "order_by": order_by, + "organization_id": organization_id + or self.client.default_organization_id, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "tags": tags, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListClustersResponse(res.json()) + + async def list_clusters_all( + self, + *, + region: Optional[ScwRegion] = None, + tags: Optional[list[str]] = None, + name: Optional[str] = None, + order_by: Optional[ListClustersRequestOrderBy] = None, + organization_id: Optional[str] = None, + project_id: Optional[str] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> list[Cluster]: + """ + List Kafka clusters. + List all Kafka clusters in the specified region. By default, the clusters returned in the list are ordered by creation date in ascending order, though this can be modified via the order_by field. You can define additional parameters for your query, such as `tags` and `name`. For the `name` parameter, the value you include will be checked against the whole name string to see if it includes the string you put in the parameter. + :param region: Region to target. If none is passed will use default region from the config. + :param tags: List Kafka cluster with a given tag. + :param name: Lists Kafka clusters that match a name pattern. + :param order_by: Criteria to use when ordering Kafka cluster listings. + :param organization_id: Organization ID of the Kafka cluster. + :param project_id: Project ID. + :param page: + :param page_size: + :return: :class:`list[Cluster] ` + + Usage: + :: + + result = await api.list_clusters_all() + """ + + return await fetch_all_pages_async( + type=ListClustersResponse, + key="clusters", + fetcher=self.list_clusters, + args={ + "region": region, + "tags": tags, + "name": name, + "order_by": order_by, + "organization_id": organization_id, + "project_id": project_id, + "page": page, + "page_size": page_size, + }, + ) + + async def get_cluster( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + ) -> Cluster: + """ + Get a Kafka cluster. + Retrieve information about a given Kafka cluster, specified by the `region` and `cluster_id` parameters. Its full details, including name, status, IP address and port, are returned in the response object. + :param cluster_id: UUID of the Kafka Cluster. + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`Cluster ` + + Usage: + :: + + result = await api.get_cluster( + cluster_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_cluster_id = validate_path_param("cluster_id", cluster_id) + + res = self._request( + "GET", + f"/kafka/v1alpha1/regions/{param_region}/clusters/{param_cluster_id}", + ) + + self._throw_on_error(res) + return unmarshal_Cluster(res.json()) + + async def wait_for_cluster( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + options: Optional[WaitForOptions[Cluster, Union[bool, Awaitable[bool]]]] = None, + ) -> Cluster: + """ + Get a Kafka cluster. + Retrieve information about a given Kafka cluster, specified by the `region` and `cluster_id` parameters. Its full details, including name, status, IP address and port, are returned in the response object. + :param cluster_id: UUID of the Kafka Cluster. + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`Cluster ` + + Usage: + :: + + result = await api.get_cluster( + cluster_id="example", + ) + """ + + if not options: + options = WaitForOptions() + + if not options.stop: + options.stop = lambda res: res.status not in CLUSTER_TRANSIENT_STATUSES + + return await wait_for_resource_async( + fetcher=self.get_cluster, + options=options, + args={ + "cluster_id": cluster_id, + "region": region, + }, + ) + + async def create_cluster( + self, + *, + version: str, + node_amount: int, + node_type: str, + region: Optional[ScwRegion] = None, + project_id: Optional[str] = None, + name: Optional[str] = None, + tags: Optional[list[str]] = None, + volume: Optional[CreateClusterRequestVolumeSpec] = None, + endpoints: Optional[list[EndpointSpec]] = None, + user_name: Optional[str] = None, + password: Optional[str] = None, + ) -> Cluster: + """ + Create a Kafka cluster. + Create a new Kafka cluster. + :param version: Version of Kafka. + :param node_amount: Number of nodes to use for the Kafka cluster. + :param node_type: Type of node to use for the Kafka cluster. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: The ID of the Project in which the Kafka cluster will be created. + :param name: Name of the Kafka cluster. + :param tags: Tags to apply to the Kafka cluster. + :param volume: Kafka volume information. + :param endpoints: One or multiple EndpointSpec used to expose your Kafka cluster. + :param user_name: Username for the kafka user. + :param password: Password for the kafka user. + :return: :class:`Cluster ` + + Usage: + :: + + result = await api.create_cluster( + version="example", + node_amount=1, + node_type="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/kafka/v1alpha1/regions/{param_region}/clusters", + body=marshal_CreateClusterRequest( + CreateClusterRequest( + version=version, + node_amount=node_amount, + node_type=node_type, + region=region, + project_id=project_id, + name=name or random_name(prefix="kafk"), + tags=tags, + volume=volume, + endpoints=endpoints, + user_name=user_name, + password=password, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Cluster(res.json()) + + async def update_cluster( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + name: Optional[str] = None, + tags: Optional[list[str]] = None, + ) -> Cluster: + """ + Update a Kafka cluster. + Update the parameters of a Kafka cluster. + :param cluster_id: UUID of the Kafka Clusters to update. + :param region: Region to target. If none is passed will use default region from the config. + :param name: Name of the Kafka Cluster. + :param tags: Tags of a Kafka Cluster. + :return: :class:`Cluster ` + + Usage: + :: + + result = await api.update_cluster( + cluster_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_cluster_id = validate_path_param("cluster_id", cluster_id) + + res = self._request( + "PATCH", + f"/kafka/v1alpha1/regions/{param_region}/clusters/{param_cluster_id}", + body=marshal_UpdateClusterRequest( + UpdateClusterRequest( + cluster_id=cluster_id, + region=region, + name=name, + tags=tags, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Cluster(res.json()) + + async def delete_cluster( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + ) -> Cluster: + """ + Delete a Kafka cluster. + Delete a given Kafka cluster, specified by the `region` and `cluster_id` parameters. Deleting a Kafka cluster is permanent, and cannot be undone. Note that upon deletion all your data will be lost. + :param cluster_id: UUID of the Kafka Cluster to delete. + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`Cluster ` + + Usage: + :: + + result = await api.delete_cluster( + cluster_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_cluster_id = validate_path_param("cluster_id", cluster_id) + + res = self._request( + "DELETE", + f"/kafka/v1alpha1/regions/{param_region}/clusters/{param_cluster_id}", + ) + + self._throw_on_error(res) + return unmarshal_Cluster(res.json()) + + async def get_cluster_certificate_authority( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + ) -> ScwFile: + """ + Get a Kafka cluster's certificate authority. + Retrieve certificate authority for a given Kafka cluster, specified by the `region` and `cluster_id` parameters. The response object contains the certificate in PEM format. The certificate is required to validate the sever from the client side during TLS connection. + :param cluster_id: UUID of the Kafka Cluster. + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`ScwFile ` + + Usage: + :: + + result = await api.get_cluster_certificate_authority( + cluster_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_cluster_id = validate_path_param("cluster_id", cluster_id) + + res = self._request( + "GET", + f"/kafka/v1alpha1/regions/{param_region}/clusters/{param_cluster_id}/certificate-authority", + ) + + self._throw_on_error(res) + return unmarshal_ScwFile(res.json()) + + async def renew_cluster_certificate_authority( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + ) -> None: + """ + Renew the Kafka cluster's certificate authority. + Request to renew the certificate authority for a given Kafka cluster, specified by the `region` and `cluster_id` parameters. The certificate authority will be renewed within a few minutes. + :param cluster_id: UUID of the Kafka Cluster. + :param region: Region to target. If none is passed will use default region from the config. + + Usage: + :: + + result = await api.renew_cluster_certificate_authority( + cluster_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_cluster_id = validate_path_param("cluster_id", cluster_id) + + res = self._request( + "POST", + f"/kafka/v1alpha1/regions/{param_region}/clusters/{param_cluster_id}/renew-certificate-authority", + body={}, + ) + + self._throw_on_error(res) + + async def delete_endpoint( + self, + *, + endpoint_id: str, + region: Optional[ScwRegion] = None, + ) -> None: + """ + Delete a Kafka cluster endpoint. + Delete the endpoint of a Kafka cluster. You must specify the `endpoint_id` parameter of the endpoint you want to delete. Note that you might need to update any environment configurations that point to the deleted endpoint. + :param endpoint_id: UUID of the endpoint to delete. + :param region: Region to target. If none is passed will use default region from the config. + + Usage: + :: + + result = await api.delete_endpoint( + endpoint_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_endpoint_id = validate_path_param("endpoint_id", endpoint_id) + + res = self._request( + "DELETE", + f"/kafka/v1alpha1/regions/{param_region}/endpoints/{param_endpoint_id}", + ) + + self._throw_on_error(res) + + async def create_endpoint( + self, + *, + cluster_id: str, + endpoint: EndpointSpec, + region: Optional[ScwRegion] = None, + ) -> Endpoint: + """ + Create a new Kafka cluster endpoint. + Create a new endpoint for a Kafka cluster. You can add `public_network` or `private_network` specifications to the body of the request. Note that currently only `private_network` is supported. + :param cluster_id: UUID of the Kafka Cluster. + :param endpoint: Endpoint object (`EndpointSpec`) used to expose your Kafka EndpointSpec. + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`Endpoint ` + + Usage: + :: + + result = await api.create_endpoint( + cluster_id="example", + endpoint=EndpointSpec(), + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/kafka/v1alpha1/regions/{param_region}/endpoints", + body=marshal_CreateEndpointRequest( + CreateEndpointRequest( + cluster_id=cluster_id, + endpoint=endpoint, + region=region, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Endpoint(res.json()) + + async def list_users( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListUsersRequestOrderBy] = None, + name: Optional[str] = None, + ) -> ListUsersResponse: + """ + Retrieve a list of deployment users. + :param cluster_id: + :param region: Region to target. If none is passed will use default region from the config. + :param page: + :param page_size: + :param order_by: + :param name: + :return: :class:`ListUsersResponse ` + + Usage: + :: + + result = await api.list_users( + cluster_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_cluster_id = validate_path_param("cluster_id", cluster_id) + + res = self._request( + "GET", + f"/kafka/v1alpha1/regions/{param_region}/clusters/{param_cluster_id}/users", + params={ + "name": name, + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListUsersResponse(res.json()) + + async def list_users_all( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListUsersRequestOrderBy] = None, + name: Optional[str] = None, + ) -> list[User]: + """ + Retrieve a list of deployment users. + :param cluster_id: + :param region: Region to target. If none is passed will use default region from the config. + :param page: + :param page_size: + :param order_by: + :param name: + :return: :class:`list[User] ` + + Usage: + :: + + result = await api.list_users_all( + cluster_id="example", + ) + """ + + return await fetch_all_pages_async( + type=ListUsersResponse, + key="users", + fetcher=self.list_users, + args={ + "cluster_id": cluster_id, + "region": region, + "page": page, + "page_size": page_size, + "order_by": order_by, + "name": name, + }, + ) + + async def update_user( + self, + *, + cluster_id: str, + username: str, + region: Optional[ScwRegion] = None, + password: Optional[str] = None, + ) -> User: + """ + Update an existing user. + :param cluster_id: ID of the cluster in which to update the user's password. + :param username: Username of the Kafka cluster user. + :param region: Region to target. If none is passed will use default region from the config. + :param password: New password for the Kafka cluster user. + :return: :class:`User ` + + Usage: + :: + + result = await api.update_user( + cluster_id="example", + username="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_cluster_id = validate_path_param("cluster_id", cluster_id) + param_username = validate_path_param("username", username) + + res = self._request( + "PATCH", + f"/kafka/v1alpha1/regions/{param_region}/clusters/{param_cluster_id}/users/{param_username}", + body=marshal_UpdateUserRequest( + UpdateUserRequest( + cluster_id=cluster_id, + username=username, + region=region, + password=password, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_User(res.json()) diff --git a/scaleway-async/scaleway_async/kafka/v1alpha1/content.py b/scaleway-async/scaleway_async/kafka/v1alpha1/content.py new file mode 100644 index 000000000..fd574593c --- /dev/null +++ b/scaleway-async/scaleway_async/kafka/v1alpha1/content.py @@ -0,0 +1,15 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. + +from .types import ( + ClusterStatus, +) + +CLUSTER_TRANSIENT_STATUSES: list[ClusterStatus] = [ + ClusterStatus.CREATING, + ClusterStatus.CONFIGURING, + ClusterStatus.DELETING, +] +""" +Lists transient statutes of the enum :class:`ClusterStatus `. +""" diff --git a/scaleway-async/scaleway_async/kafka/v1alpha1/marshalling.py b/scaleway-async/scaleway_async/kafka/v1alpha1/marshalling.py new file mode 100644 index 000000000..c69c7cfce --- /dev/null +++ b/scaleway-async/scaleway_async/kafka/v1alpha1/marshalling.py @@ -0,0 +1,862 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. + +from typing import Any +from dateutil import parser + +from scaleway_core.profile import ProfileDefaults +from scaleway_core.utils import ( + OneOfPossibility, + resolve_one_of, +) +from .types import ( + ClusterStatus, + NodeTypeStock, + VolumeType, + EndpointPrivateNetworkDetails, + EndpointPublicDetails, + Endpoint, + ClusterSetting, + Volume, + Cluster, + User, + ListClustersResponse, + NodeTypeVolumeType, + NodeType, + ListNodeTypesResponse, + ListUsersResponse, + VersionAvailableSettingBooleanProperty, + VersionAvailableSettingFloatProperty, + VersionAvailableSettingIntegerProperty, + VersionAvailableSettingStringProperty, + VersionAvailableSetting, + Version, + ListVersionsResponse, + EndpointSpecPrivateNetworkDetails, + EndpointSpecPublicDetails, + CreateClusterRequestVolumeSpec, + EndpointSpec, + CreateClusterRequest, + CreateEndpointRequest, + UpdateClusterRequest, + UpdateUserRequest, +) + + +def unmarshal_EndpointPrivateNetworkDetails(data: Any) -> EndpointPrivateNetworkDetails: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'EndpointPrivateNetworkDetails' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("private_network_id", None) + if field is not None: + args["private_network_id"] = field + else: + args["private_network_id"] = None + + return EndpointPrivateNetworkDetails(**args) + + +def unmarshal_EndpointPublicDetails(data: Any) -> EndpointPublicDetails: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'EndpointPublicDetails' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + return EndpointPublicDetails(**args) + + +def unmarshal_Endpoint(data: Any) -> Endpoint: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Endpoint' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + else: + args["id"] = None + + field = data.get("dns_records", None) + if field is not None: + args["dns_records"] = field + else: + args["dns_records"] = [] + + field = data.get("port", None) + if field is not None: + args["port"] = field + else: + args["port"] = 0 + + field = data.get("private_network", None) + if field is not None: + args["private_network"] = unmarshal_EndpointPrivateNetworkDetails(field) + else: + args["private_network"] = None + + field = data.get("public_network", None) + if field is not None: + args["public_network"] = unmarshal_EndpointPublicDetails(field) + else: + args["public_network"] = None + + return Endpoint(**args) + + +def unmarshal_ClusterSetting(data: Any) -> ClusterSetting: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ClusterSetting' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("name", None) + if field is not None: + args["name"] = field + else: + args["name"] = None + + field = data.get("bool_value", None) + if field is not None: + args["bool_value"] = field + else: + args["bool_value"] = False + + field = data.get("string_value", None) + if field is not None: + args["string_value"] = field + else: + args["string_value"] = None + + field = data.get("int_value", None) + if field is not None: + args["int_value"] = field + else: + args["int_value"] = 0 + + field = data.get("float_value", None) + if field is not None: + args["float_value"] = field + else: + args["float_value"] = 0.0 + + return ClusterSetting(**args) + + +def unmarshal_Volume(data: Any) -> Volume: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Volume' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("type", None) + if field is not None: + args["type_"] = field + else: + args["type_"] = VolumeType.UNKNOWN_TYPE + + field = data.get("size_bytes", None) + if field is not None: + args["size_bytes"] = field + else: + args["size_bytes"] = 0 + + return Volume(**args) + + +def unmarshal_Cluster(data: Any) -> Cluster: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Cluster' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + else: + args["id"] = None + + field = data.get("name", None) + if field is not None: + args["name"] = field + else: + args["name"] = None + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + else: + args["project_id"] = None + + field = data.get("organization_id", None) + if field is not None: + args["organization_id"] = field + else: + args["organization_id"] = None + + field = data.get("status", None) + if field is not None: + args["status"] = field + else: + args["status"] = ClusterStatus.UNKNOWN_STATUS + + field = data.get("version", None) + if field is not None: + args["version"] = field + else: + args["version"] = None + + field = data.get("tags", None) + if field is not None: + args["tags"] = field + else: + args["tags"] = [] + + field = data.get("settings", None) + if field is not None: + args["settings"] = ( + [unmarshal_ClusterSetting(v) for v in field] if field is not None else None + ) + else: + args["settings"] = [] + + field = data.get("node_amount", None) + if field is not None: + args["node_amount"] = field + else: + args["node_amount"] = 0 + + field = data.get("node_type", None) + if field is not None: + args["node_type"] = field + else: + args["node_type"] = None + + field = data.get("endpoints", None) + if field is not None: + args["endpoints"] = ( + [unmarshal_Endpoint(v) for v in field] if field is not None else None + ) + else: + args["endpoints"] = [] + + field = data.get("region", None) + if field is not None: + args["region"] = field + else: + args["region"] = None + + field = data.get("volume", None) + if field is not None: + args["volume"] = unmarshal_Volume(field) + else: + args["volume"] = None + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None + + field = data.get("updated_at", None) + if field is not None: + args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None + + return Cluster(**args) + + +def unmarshal_User(data: Any) -> User: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'User' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("username", None) + if field is not None: + args["username"] = field + else: + args["username"] = None + + return User(**args) + + +def unmarshal_ListClustersResponse(data: Any) -> ListClustersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListClustersResponse' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("clusters", None) + if field is not None: + args["clusters"] = ( + [unmarshal_Cluster(v) for v in field] if field is not None else None + ) + else: + args["clusters"] = [] + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + else: + args["total_count"] = 0 + + return ListClustersResponse(**args) + + +def unmarshal_NodeTypeVolumeType(data: Any) -> NodeTypeVolumeType: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'NodeTypeVolumeType' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("type", None) + if field is not None: + args["type_"] = field + else: + args["type_"] = VolumeType.UNKNOWN_TYPE + + field = data.get("description", None) + if field is not None: + args["description"] = field + else: + args["description"] = None + + field = data.get("min_size_bytes", None) + if field is not None: + args["min_size_bytes"] = field + else: + args["min_size_bytes"] = 0 + + field = data.get("max_size_bytes", None) + if field is not None: + args["max_size_bytes"] = field + else: + args["max_size_bytes"] = 0 + + field = data.get("chunk_size_bytes", None) + if field is not None: + args["chunk_size_bytes"] = field + else: + args["chunk_size_bytes"] = 0 + + return NodeTypeVolumeType(**args) + + +def unmarshal_NodeType(data: Any) -> NodeType: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'NodeType' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("name", None) + if field is not None: + args["name"] = field + else: + args["name"] = None + + field = data.get("stock_status", None) + if field is not None: + args["stock_status"] = field + else: + args["stock_status"] = NodeTypeStock.UNKNOWN_STOCK + + field = data.get("description", None) + if field is not None: + args["description"] = field + else: + args["description"] = None + + field = data.get("vcpus", None) + if field is not None: + args["vcpus"] = field + else: + args["vcpus"] = 0 + + field = data.get("memory_bytes", None) + if field is not None: + args["memory_bytes"] = field + else: + args["memory_bytes"] = 0 + + field = data.get("available_volume_types", None) + if field is not None: + args["available_volume_types"] = ( + [unmarshal_NodeTypeVolumeType(v) for v in field] + if field is not None + else None + ) + else: + args["available_volume_types"] = [] + + field = data.get("disabled", None) + if field is not None: + args["disabled"] = field + else: + args["disabled"] = False + + field = data.get("beta", None) + if field is not None: + args["beta"] = field + else: + args["beta"] = False + + field = data.get("cluster_range", None) + if field is not None: + args["cluster_range"] = field + else: + args["cluster_range"] = None + + return NodeType(**args) + + +def unmarshal_ListNodeTypesResponse(data: Any) -> ListNodeTypesResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListNodeTypesResponse' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("node_types", None) + if field is not None: + args["node_types"] = ( + [unmarshal_NodeType(v) for v in field] if field is not None else None + ) + else: + args["node_types"] = [] + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + else: + args["total_count"] = 0 + + return ListNodeTypesResponse(**args) + + +def unmarshal_ListUsersResponse(data: Any) -> ListUsersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListUsersResponse' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("users", None) + if field is not None: + args["users"] = ( + [unmarshal_User(v) for v in field] if field is not None else None + ) + else: + args["users"] = None + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + else: + args["total_count"] = None + + return ListUsersResponse(**args) + + +def unmarshal_VersionAvailableSettingBooleanProperty( + data: Any, +) -> VersionAvailableSettingBooleanProperty: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'VersionAvailableSettingBooleanProperty' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("default_value", None) + if field is not None: + args["default_value"] = field + else: + args["default_value"] = None + + return VersionAvailableSettingBooleanProperty(**args) + + +def unmarshal_VersionAvailableSettingFloatProperty( + data: Any, +) -> VersionAvailableSettingFloatProperty: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'VersionAvailableSettingFloatProperty' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("min", None) + if field is not None: + args["min"] = field + else: + args["min"] = None + + field = data.get("max", None) + if field is not None: + args["max"] = field + else: + args["max"] = None + + field = data.get("default_value", None) + if field is not None: + args["default_value"] = field + else: + args["default_value"] = None + + field = data.get("unit", None) + if field is not None: + args["unit"] = field + else: + args["unit"] = None + + return VersionAvailableSettingFloatProperty(**args) + + +def unmarshal_VersionAvailableSettingIntegerProperty( + data: Any, +) -> VersionAvailableSettingIntegerProperty: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'VersionAvailableSettingIntegerProperty' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("min", None) + if field is not None: + args["min"] = field + else: + args["min"] = None + + field = data.get("max", None) + if field is not None: + args["max"] = field + else: + args["max"] = None + + field = data.get("default_value", None) + if field is not None: + args["default_value"] = field + else: + args["default_value"] = None + + field = data.get("unit", None) + if field is not None: + args["unit"] = field + else: + args["unit"] = None + + return VersionAvailableSettingIntegerProperty(**args) + + +def unmarshal_VersionAvailableSettingStringProperty( + data: Any, +) -> VersionAvailableSettingStringProperty: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'VersionAvailableSettingStringProperty' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("default_value", None) + if field is not None: + args["default_value"] = field + else: + args["default_value"] = None + + field = data.get("string_constraint", None) + if field is not None: + args["string_constraint"] = field + else: + args["string_constraint"] = None + + return VersionAvailableSettingStringProperty(**args) + + +def unmarshal_VersionAvailableSetting(data: Any) -> VersionAvailableSetting: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'VersionAvailableSetting' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("name", None) + if field is not None: + args["name"] = field + else: + args["name"] = None + + field = data.get("hot_configurable", None) + if field is not None: + args["hot_configurable"] = field + else: + args["hot_configurable"] = False + + field = data.get("description", None) + if field is not None: + args["description"] = field + else: + args["description"] = None + + field = data.get("bool_property", None) + if field is not None: + args["bool_property"] = unmarshal_VersionAvailableSettingBooleanProperty(field) + else: + args["bool_property"] = None + + field = data.get("string_property", None) + if field is not None: + args["string_property"] = unmarshal_VersionAvailableSettingStringProperty(field) + else: + args["string_property"] = None + + field = data.get("int_property", None) + if field is not None: + args["int_property"] = unmarshal_VersionAvailableSettingIntegerProperty(field) + else: + args["int_property"] = None + + field = data.get("float_property", None) + if field is not None: + args["float_property"] = unmarshal_VersionAvailableSettingFloatProperty(field) + else: + args["float_property"] = None + + return VersionAvailableSetting(**args) + + +def unmarshal_Version(data: Any) -> Version: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Version' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("version", None) + if field is not None: + args["version"] = field + else: + args["version"] = None + + field = data.get("available_settings", None) + if field is not None: + args["available_settings"] = ( + [unmarshal_VersionAvailableSetting(v) for v in field] + if field is not None + else None + ) + else: + args["available_settings"] = [] + + field = data.get("end_of_life_at", None) + if field is not None: + args["end_of_life_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + else: + args["end_of_life_at"] = None + + return Version(**args) + + +def unmarshal_ListVersionsResponse(data: Any) -> ListVersionsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListVersionsResponse' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("versions", None) + if field is not None: + args["versions"] = ( + [unmarshal_Version(v) for v in field] if field is not None else None + ) + else: + args["versions"] = [] + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + else: + args["total_count"] = 0 + + return ListVersionsResponse(**args) + + +def marshal_EndpointSpecPrivateNetworkDetails( + request: EndpointSpecPrivateNetworkDetails, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + + if request.private_network_id is not None: + output["private_network_id"] = request.private_network_id + + return output + + +def marshal_EndpointSpecPublicDetails( + request: EndpointSpecPublicDetails, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + + return output + + +def marshal_CreateClusterRequestVolumeSpec( + request: CreateClusterRequestVolumeSpec, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + + if request.size_bytes is not None: + output["size_bytes"] = request.size_bytes + + if request.type_ is not None: + output["type"] = request.type_ + + return output + + +def marshal_EndpointSpec( + request: EndpointSpec, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + output.update( + resolve_one_of( + [ + OneOfPossibility( + param="public_network", + value=request.public_network, + marshal_func=marshal_EndpointSpecPublicDetails, + ), + OneOfPossibility( + param="private_network", + value=request.private_network, + marshal_func=marshal_EndpointSpecPrivateNetworkDetails, + ), + ] + ), + ) + + return output + + +def marshal_CreateClusterRequest( + request: CreateClusterRequest, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + + if request.version is not None: + output["version"] = request.version + + if request.node_amount is not None: + output["node_amount"] = request.node_amount + + if request.node_type is not None: + output["node_type"] = request.node_type + + if request.project_id is not None: + output["project_id"] = request.project_id + else: + output["project_id"] = defaults.default_project_id + + if request.name is not None: + output["name"] = request.name + + if request.tags is not None: + output["tags"] = request.tags + + if request.volume is not None: + output["volume"] = marshal_CreateClusterRequestVolumeSpec( + request.volume, defaults + ) + + if request.endpoints is not None: + output["endpoints"] = [ + marshal_EndpointSpec(item, defaults) for item in request.endpoints + ] + + if request.user_name is not None: + output["user_name"] = request.user_name + + if request.password is not None: + output["password"] = request.password + + return output + + +def marshal_CreateEndpointRequest( + request: CreateEndpointRequest, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + + if request.cluster_id is not None: + output["cluster_id"] = request.cluster_id + + if request.endpoint is not None: + output["endpoint"] = marshal_EndpointSpec(request.endpoint, defaults) + + return output + + +def marshal_UpdateClusterRequest( + request: UpdateClusterRequest, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + + if request.name is not None: + output["name"] = request.name + + if request.tags is not None: + output["tags"] = request.tags + + return output + + +def marshal_UpdateUserRequest( + request: UpdateUserRequest, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + + if request.password is not None: + output["password"] = request.password + + return output diff --git a/scaleway-async/scaleway_async/kafka/v1alpha1/types.py b/scaleway-async/scaleway_async/kafka/v1alpha1/types.py new file mode 100644 index 000000000..b5c8e0822 --- /dev/null +++ b/scaleway-async/scaleway_async/kafka/v1alpha1/types.py @@ -0,0 +1,730 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. +from __future__ import annotations + +from dataclasses import dataclass, field +from datetime import datetime +from enum import Enum +from typing import Optional + +from scaleway_core.bridge import ( + Region as ScwRegion, +) +from scaleway_core.utils import ( + StrEnumMeta, +) + + +class ClusterStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_STATUS = "unknown_status" + READY = "ready" + CREATING = "creating" + CONFIGURING = "configuring" + DELETING = "deleting" + ERROR = "error" + LOCKED = "locked" + STOPPED = "stopped" + + def __str__(self) -> str: + return str(self.value) + + +class ListClustersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + NAME_ASC = "name_asc" + NAME_DESC = "name_desc" + STATUS_ASC = "status_asc" + STATUS_DESC = "status_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListUsersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + NAME_ASC = "name_asc" + NAME_DESC = "name_desc" + + def __str__(self) -> str: + return str(self.value) + + +class NodeTypeStock(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_STOCK = "unknown_stock" + LOW_STOCK = "low_stock" + OUT_OF_STOCK = "out_of_stock" + AVAILABLE = "available" + + def __str__(self) -> str: + return str(self.value) + + +class VolumeType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_TYPE = "unknown_type" + SBS_5K = "sbs_5k" + SBS_15K = "sbs_15k" + + def __str__(self) -> str: + return str(self.value) + + +@dataclass +class EndpointPrivateNetworkDetails: + """ + Private Network details. + """ + + private_network_id: str + """ + UUID of the Private Network. + """ + + +@dataclass +class EndpointPublicDetails: + """ + Public Access details. + """ + + pass + + +@dataclass +class VersionAvailableSettingBooleanProperty: + default_value: bool + + +@dataclass +class VersionAvailableSettingFloatProperty: + min: float + max: float + default_value: float + unit: Optional[str] = None + + +@dataclass +class VersionAvailableSettingIntegerProperty: + min: int + max: int + default_value: int + unit: Optional[str] = None + + +@dataclass +class VersionAvailableSettingStringProperty: + default_value: str + string_constraint: Optional[str] = None + + +@dataclass +class EndpointSpecPrivateNetworkDetails: + private_network_id: str + """ + UUID of the Private Network. + """ + + +@dataclass +class EndpointSpecPublicDetails: + pass + + +@dataclass +class ClusterSetting: + name: str + """ + Name of the setting. + """ + + bool_value: Optional[bool] = False + + string_value: Optional[str] = None + + int_value: Optional[int] = 0 + + float_value: Optional[float] = 0.0 + + +@dataclass +class Endpoint: + id: str + """ + UUID of the endpoint. + """ + + dns_records: list[str] + """ + List of DNS records of the endpoint. + """ + + port: int + """ + TCP port of the endpoint. + """ + + private_network: Optional[EndpointPrivateNetworkDetails] = None + + public_network: Optional[EndpointPublicDetails] = None + + +@dataclass +class Volume: + type_: VolumeType + """ + Type of volume where data is stored. + """ + + size_bytes: int + """ + Volume size. + """ + + +@dataclass +class NodeTypeVolumeType: + type_: VolumeType + """ + Volume type. + """ + + description: str + """ + The description of the volume. + """ + + min_size_bytes: int + """ + Mimimum size required for the volume. + """ + + max_size_bytes: int + """ + Maximum size required for the volume. + """ + + chunk_size_bytes: int + """ + Minimum increment level for a Block Storage volume size. + """ + + +@dataclass +class VersionAvailableSetting: + name: str + """ + Kafka cluster setting name. + """ + + hot_configurable: bool + """ + Defines whether this setting can be applied without needing a restart. + """ + + description: str + """ + Setting description. + """ + + bool_property: Optional[VersionAvailableSettingBooleanProperty] = None + + string_property: Optional[VersionAvailableSettingStringProperty] = None + + int_property: Optional[VersionAvailableSettingIntegerProperty] = None + + float_property: Optional[VersionAvailableSettingFloatProperty] = None + + +@dataclass +class CreateClusterRequestVolumeSpec: + size_bytes: int + """ + Volume size. + """ + + type_: VolumeType + """ + Type of volume where data is stored. + """ + + +@dataclass +class EndpointSpec: + public_network: Optional[EndpointSpecPublicDetails] = None + + private_network: Optional[EndpointSpecPrivateNetworkDetails] = None + + +@dataclass +class Cluster: + id: str + """ + UUID of the Kafka cluster. + """ + + name: str + """ + Name of the Kafka cluster. + """ + + project_id: str + """ + Project ID the Kafka cluster belongs to. + """ + + organization_id: str + """ + Organisation ID the Kafka cluster belongs to. + """ + + status: ClusterStatus + """ + Status of the Kafka cluster. + """ + + version: str + """ + Kafka version of the Kafka cluster. + """ + + tags: list[str] + """ + List of tags applied to the Kafka cluster. + """ + + settings: list[ClusterSetting] + """ + Advanced settings of the Kafka cluster. + """ + + node_amount: int + """ + Number of nodes in Kafka cluster. + """ + + node_type: str + """ + Node type of the Kafka cluster. + """ + + endpoints: list[Endpoint] + """ + List of Kafka cluster endpoints. + """ + + region: ScwRegion + """ + Region the Kafka cluster is in. + """ + + volume: Optional[Volume] = None + """ + Volumes of the Kafka cluster. + """ + + created_at: Optional[datetime] = None + """ + Creation date (must follow the ISO 8601 format). + """ + + updated_at: Optional[datetime] = None + """ + Last update date (must follow the ISO 8601 format). + """ + + +@dataclass +class NodeType: + name: str + """ + Node type name identifier. + """ + + stock_status: NodeTypeStock + """ + Current stock status for the node type. + """ + + description: str + """ + Current specifications of the node type offer. + """ + + vcpus: int + """ + Number of virtual CPUs of the node type. + """ + + memory_bytes: int + """ + Quantity of RAM. + """ + + available_volume_types: list[NodeTypeVolumeType] + """ + Available storage options for the node type. + """ + + disabled: bool + """ + Defines whether the node type is currently disabled. + """ + + beta: bool + """ + Defines whether the node type is currently in beta. + """ + + cluster_range: str + """ + Cluster range associated with the node type offer. + """ + + +@dataclass +class User: + username: str + + +@dataclass +class Version: + version: str + """ + Kafka version. + """ + + available_settings: list[VersionAvailableSetting] + """ + Cluster configuration settings you are able to change for clusters running this version. Each item in `available_settings` describes one configurable cluster setting. + """ + + end_of_life_at: Optional[datetime] = None + """ + Date of End of Life for the version. + """ + + +@dataclass +class CreateClusterRequest: + version: str + """ + Version of Kafka. + """ + + node_amount: int + """ + Number of nodes to use for the Kafka cluster. + """ + + node_type: str + """ + Type of node to use for the Kafka cluster. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] = None + """ + The ID of the Project in which the Kafka cluster will be created. + """ + + name: Optional[str] = None + """ + Name of the Kafka cluster. + """ + + tags: Optional[list[str]] = field(default_factory=list) + """ + Tags to apply to the Kafka cluster. + """ + + volume: Optional[CreateClusterRequestVolumeSpec] = None + """ + Kafka volume information. + """ + + endpoints: Optional[list[EndpointSpec]] = field(default_factory=list) + """ + One or multiple EndpointSpec used to expose your Kafka cluster. + """ + + user_name: Optional[str] = None + """ + Username for the kafka user. + """ + + password: Optional[str] = None + """ + Password for the kafka user. + """ + + +@dataclass +class CreateEndpointRequest: + cluster_id: str + """ + UUID of the Kafka Cluster. + """ + + endpoint: EndpointSpec + """ + Endpoint object (`EndpointSpec`) used to expose your Kafka EndpointSpec. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class DeleteClusterRequest: + cluster_id: str + """ + UUID of the Kafka Cluster to delete. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class DeleteEndpointRequest: + endpoint_id: str + """ + UUID of the endpoint to delete. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class GetClusterCertificateAuthorityRequest: + cluster_id: str + """ + UUID of the Kafka Cluster. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class GetClusterRequest: + cluster_id: str + """ + UUID of the Kafka Cluster. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class ListClustersRequest: + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + tags: Optional[list[str]] = field(default_factory=list) + """ + List Kafka cluster with a given tag. + """ + + name: Optional[str] = None + """ + Lists Kafka clusters that match a name pattern. + """ + + order_by: Optional[ListClustersRequestOrderBy] = ( + ListClustersRequestOrderBy.CREATED_AT_ASC + ) + """ + Criteria to use when ordering Kafka cluster listings. + """ + + organization_id: Optional[str] = None + """ + Organization ID of the Kafka cluster. + """ + + project_id: Optional[str] = None + """ + Project ID. + """ + + page: Optional[int] = 0 + page_size: Optional[int] = 0 + + +@dataclass +class ListClustersResponse: + clusters: list[Cluster] + """ + List of all Kafka cluster available in an Organization or Project. + """ + + total_count: int + """ + Total count of Kafka cluster available in an Organization or Project. + """ + + +@dataclass +class ListNodeTypesRequest: + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + include_disabled_types: Optional[bool] = False + """ + Defines whether or not to include disabled types. + """ + + page: Optional[int] = 0 + page_size: Optional[int] = 0 + + +@dataclass +class ListNodeTypesResponse: + node_types: list[NodeType] + """ + Types of the node. + """ + + total_count: int + """ + Total count of node types available. + """ + + +@dataclass +class ListUsersRequest: + cluster_id: str + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + page: Optional[int] = None + page_size: Optional[int] = None + order_by: Optional[ListUsersRequestOrderBy] = None + name: Optional[str] = None + + +@dataclass +class ListUsersResponse: + users: list[User] + total_count: int + + +@dataclass +class ListVersionsRequest: + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + version: Optional[str] = None + """ + Kafka version to filter for. + """ + + page: Optional[int] = 0 + """ + The page number to return, from the paginated results. + """ + + page_size: Optional[int] = 0 + """ + The number of items to return. + """ + + +@dataclass +class ListVersionsResponse: + versions: list[Version] + """ + Available Kafka versions. + """ + + total_count: int + """ + Total count of Kafka versions available. + """ + + +@dataclass +class RenewClusterCertificateAuthorityRequest: + cluster_id: str + """ + UUID of the Kafka Cluster. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class UpdateClusterRequest: + cluster_id: str + """ + UUID of the Kafka Clusters to update. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + name: Optional[str] = None + """ + Name of the Kafka Cluster. + """ + + tags: Optional[list[str]] = field(default_factory=list) + """ + Tags of a Kafka Cluster. + """ + + +@dataclass +class UpdateUserRequest: + """ + Update a user of a Kafka cluster. + """ + + cluster_id: str + """ + ID of the cluster in which to update the user's password. + """ + + username: str + """ + Username of the Kafka cluster user. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + password: Optional[str] = None + """ + New password for the Kafka cluster user. + """ diff --git a/scaleway/scaleway/kafka/__init__.py b/scaleway/scaleway/kafka/__init__.py new file mode 100644 index 000000000..8b74a5ed7 --- /dev/null +++ b/scaleway/scaleway/kafka/__init__.py @@ -0,0 +1,2 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. diff --git a/scaleway/scaleway/kafka/v1alpha1/__init__.py b/scaleway/scaleway/kafka/v1alpha1/__init__.py new file mode 100644 index 000000000..ae2f05903 --- /dev/null +++ b/scaleway/scaleway/kafka/v1alpha1/__init__.py @@ -0,0 +1,91 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. +from .types import ClusterStatus +from .content import CLUSTER_TRANSIENT_STATUSES +from .types import ListClustersRequestOrderBy +from .types import ListUsersRequestOrderBy +from .types import NodeTypeStock +from .types import VolumeType +from .types import EndpointPrivateNetworkDetails +from .types import EndpointPublicDetails +from .types import VersionAvailableSettingBooleanProperty +from .types import VersionAvailableSettingFloatProperty +from .types import VersionAvailableSettingIntegerProperty +from .types import VersionAvailableSettingStringProperty +from .types import EndpointSpecPrivateNetworkDetails +from .types import EndpointSpecPublicDetails +from .types import ClusterSetting +from .types import Endpoint +from .types import Volume +from .types import NodeTypeVolumeType +from .types import VersionAvailableSetting +from .types import CreateClusterRequestVolumeSpec +from .types import EndpointSpec +from .types import Cluster +from .types import NodeType +from .types import User +from .types import Version +from .types import CreateClusterRequest +from .types import CreateEndpointRequest +from .types import DeleteClusterRequest +from .types import DeleteEndpointRequest +from .types import GetClusterCertificateAuthorityRequest +from .types import GetClusterRequest +from .types import ListClustersRequest +from .types import ListClustersResponse +from .types import ListNodeTypesRequest +from .types import ListNodeTypesResponse +from .types import ListUsersRequest +from .types import ListUsersResponse +from .types import ListVersionsRequest +from .types import ListVersionsResponse +from .types import RenewClusterCertificateAuthorityRequest +from .types import UpdateClusterRequest +from .types import UpdateUserRequest +from .api import KafkaV1Alpha1API + +__all__ = [ + "ClusterStatus", + "CLUSTER_TRANSIENT_STATUSES", + "ListClustersRequestOrderBy", + "ListUsersRequestOrderBy", + "NodeTypeStock", + "VolumeType", + "EndpointPrivateNetworkDetails", + "EndpointPublicDetails", + "VersionAvailableSettingBooleanProperty", + "VersionAvailableSettingFloatProperty", + "VersionAvailableSettingIntegerProperty", + "VersionAvailableSettingStringProperty", + "EndpointSpecPrivateNetworkDetails", + "EndpointSpecPublicDetails", + "ClusterSetting", + "Endpoint", + "Volume", + "NodeTypeVolumeType", + "VersionAvailableSetting", + "CreateClusterRequestVolumeSpec", + "EndpointSpec", + "Cluster", + "NodeType", + "User", + "Version", + "CreateClusterRequest", + "CreateEndpointRequest", + "DeleteClusterRequest", + "DeleteEndpointRequest", + "GetClusterCertificateAuthorityRequest", + "GetClusterRequest", + "ListClustersRequest", + "ListClustersResponse", + "ListNodeTypesRequest", + "ListNodeTypesResponse", + "ListUsersRequest", + "ListUsersResponse", + "ListVersionsRequest", + "ListVersionsResponse", + "RenewClusterCertificateAuthorityRequest", + "UpdateClusterRequest", + "UpdateUserRequest", + "KafkaV1Alpha1API", +] diff --git a/scaleway/scaleway/kafka/v1alpha1/api.py b/scaleway/scaleway/kafka/v1alpha1/api.py new file mode 100644 index 000000000..e88b28793 --- /dev/null +++ b/scaleway/scaleway/kafka/v1alpha1/api.py @@ -0,0 +1,808 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. + +from typing import Optional + +from scaleway_core.api import API +from scaleway_core.bridge import ( + Region as ScwRegion, + ScwFile, + unmarshal_ScwFile, +) +from scaleway_core.utils import ( + WaitForOptions, + random_name, + validate_path_param, + fetch_all_pages, + wait_for_resource, +) +from .types import ( + ListClustersRequestOrderBy, + ListUsersRequestOrderBy, + Cluster, + CreateClusterRequest, + CreateClusterRequestVolumeSpec, + CreateEndpointRequest, + Endpoint, + EndpointSpec, + ListClustersResponse, + ListNodeTypesResponse, + ListUsersResponse, + ListVersionsResponse, + NodeType, + UpdateClusterRequest, + UpdateUserRequest, + User, + Version, +) +from .content import ( + CLUSTER_TRANSIENT_STATUSES, +) +from .marshalling import ( + unmarshal_Endpoint, + unmarshal_Cluster, + unmarshal_User, + unmarshal_ListClustersResponse, + unmarshal_ListNodeTypesResponse, + unmarshal_ListUsersResponse, + unmarshal_ListVersionsResponse, + marshal_CreateClusterRequest, + marshal_CreateEndpointRequest, + marshal_UpdateClusterRequest, + marshal_UpdateUserRequest, +) + + +class KafkaV1Alpha1API(API): + """ + This API allows you to manage your Clusters for Apache Kafka®. This product is currently in Private Beta. + """ + + def list_node_types( + self, + *, + region: Optional[ScwRegion] = None, + include_disabled_types: Optional[bool] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> ListNodeTypesResponse: + """ + List available node types. + :param region: Region to target. If none is passed will use default region from the config. + :param include_disabled_types: Defines whether or not to include disabled types. + :param page: + :param page_size: + :return: :class:`ListNodeTypesResponse ` + + Usage: + :: + + result = api.list_node_types() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/kafka/v1alpha1/regions/{param_region}/node-types", + params={ + "include_disabled_types": include_disabled_types, + "page": page, + "page_size": page_size or self.client.default_page_size, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListNodeTypesResponse(res.json()) + + def list_node_types_all( + self, + *, + region: Optional[ScwRegion] = None, + include_disabled_types: Optional[bool] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> list[NodeType]: + """ + List available node types. + :param region: Region to target. If none is passed will use default region from the config. + :param include_disabled_types: Defines whether or not to include disabled types. + :param page: + :param page_size: + :return: :class:`list[NodeType] ` + + Usage: + :: + + result = api.list_node_types_all() + """ + + return fetch_all_pages( + type=ListNodeTypesResponse, + key="node_types", + fetcher=self.list_node_types, + args={ + "region": region, + "include_disabled_types": include_disabled_types, + "page": page, + "page_size": page_size, + }, + ) + + def list_versions( + self, + *, + region: Optional[ScwRegion] = None, + version: Optional[str] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> ListVersionsResponse: + """ + List Kafka versions. + List all available versions of Kafka at the current time. + :param region: Region to target. If none is passed will use default region from the config. + :param version: Kafka version to filter for. + :param page: The page number to return, from the paginated results. + :param page_size: The number of items to return. + :return: :class:`ListVersionsResponse ` + + Usage: + :: + + result = api.list_versions() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/kafka/v1alpha1/regions/{param_region}/versions", + params={ + "page": page, + "page_size": page_size or self.client.default_page_size, + "version": version, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListVersionsResponse(res.json()) + + def list_versions_all( + self, + *, + region: Optional[ScwRegion] = None, + version: Optional[str] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> list[Version]: + """ + List Kafka versions. + List all available versions of Kafka at the current time. + :param region: Region to target. If none is passed will use default region from the config. + :param version: Kafka version to filter for. + :param page: The page number to return, from the paginated results. + :param page_size: The number of items to return. + :return: :class:`list[Version] ` + + Usage: + :: + + result = api.list_versions_all() + """ + + return fetch_all_pages( + type=ListVersionsResponse, + key="versions", + fetcher=self.list_versions, + args={ + "region": region, + "version": version, + "page": page, + "page_size": page_size, + }, + ) + + def list_clusters( + self, + *, + region: Optional[ScwRegion] = None, + tags: Optional[list[str]] = None, + name: Optional[str] = None, + order_by: Optional[ListClustersRequestOrderBy] = None, + organization_id: Optional[str] = None, + project_id: Optional[str] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> ListClustersResponse: + """ + List Kafka clusters. + List all Kafka clusters in the specified region. By default, the clusters returned in the list are ordered by creation date in ascending order, though this can be modified via the order_by field. You can define additional parameters for your query, such as `tags` and `name`. For the `name` parameter, the value you include will be checked against the whole name string to see if it includes the string you put in the parameter. + :param region: Region to target. If none is passed will use default region from the config. + :param tags: List Kafka cluster with a given tag. + :param name: Lists Kafka clusters that match a name pattern. + :param order_by: Criteria to use when ordering Kafka cluster listings. + :param organization_id: Organization ID of the Kafka cluster. + :param project_id: Project ID. + :param page: + :param page_size: + :return: :class:`ListClustersResponse ` + + Usage: + :: + + result = api.list_clusters() + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "GET", + f"/kafka/v1alpha1/regions/{param_region}/clusters", + params={ + "name": name, + "order_by": order_by, + "organization_id": organization_id + or self.client.default_organization_id, + "page": page, + "page_size": page_size or self.client.default_page_size, + "project_id": project_id or self.client.default_project_id, + "tags": tags, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListClustersResponse(res.json()) + + def list_clusters_all( + self, + *, + region: Optional[ScwRegion] = None, + tags: Optional[list[str]] = None, + name: Optional[str] = None, + order_by: Optional[ListClustersRequestOrderBy] = None, + organization_id: Optional[str] = None, + project_id: Optional[str] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + ) -> list[Cluster]: + """ + List Kafka clusters. + List all Kafka clusters in the specified region. By default, the clusters returned in the list are ordered by creation date in ascending order, though this can be modified via the order_by field. You can define additional parameters for your query, such as `tags` and `name`. For the `name` parameter, the value you include will be checked against the whole name string to see if it includes the string you put in the parameter. + :param region: Region to target. If none is passed will use default region from the config. + :param tags: List Kafka cluster with a given tag. + :param name: Lists Kafka clusters that match a name pattern. + :param order_by: Criteria to use when ordering Kafka cluster listings. + :param organization_id: Organization ID of the Kafka cluster. + :param project_id: Project ID. + :param page: + :param page_size: + :return: :class:`list[Cluster] ` + + Usage: + :: + + result = api.list_clusters_all() + """ + + return fetch_all_pages( + type=ListClustersResponse, + key="clusters", + fetcher=self.list_clusters, + args={ + "region": region, + "tags": tags, + "name": name, + "order_by": order_by, + "organization_id": organization_id, + "project_id": project_id, + "page": page, + "page_size": page_size, + }, + ) + + def get_cluster( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + ) -> Cluster: + """ + Get a Kafka cluster. + Retrieve information about a given Kafka cluster, specified by the `region` and `cluster_id` parameters. Its full details, including name, status, IP address and port, are returned in the response object. + :param cluster_id: UUID of the Kafka Cluster. + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`Cluster ` + + Usage: + :: + + result = api.get_cluster( + cluster_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_cluster_id = validate_path_param("cluster_id", cluster_id) + + res = self._request( + "GET", + f"/kafka/v1alpha1/regions/{param_region}/clusters/{param_cluster_id}", + ) + + self._throw_on_error(res) + return unmarshal_Cluster(res.json()) + + def wait_for_cluster( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + options: Optional[WaitForOptions[Cluster, bool]] = None, + ) -> Cluster: + """ + Get a Kafka cluster. + Retrieve information about a given Kafka cluster, specified by the `region` and `cluster_id` parameters. Its full details, including name, status, IP address and port, are returned in the response object. + :param cluster_id: UUID of the Kafka Cluster. + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`Cluster ` + + Usage: + :: + + result = api.get_cluster( + cluster_id="example", + ) + """ + + if not options: + options = WaitForOptions() + + if not options.stop: + options.stop = lambda res: res.status not in CLUSTER_TRANSIENT_STATUSES + + return wait_for_resource( + fetcher=self.get_cluster, + options=options, + args={ + "cluster_id": cluster_id, + "region": region, + }, + ) + + def create_cluster( + self, + *, + version: str, + node_amount: int, + node_type: str, + region: Optional[ScwRegion] = None, + project_id: Optional[str] = None, + name: Optional[str] = None, + tags: Optional[list[str]] = None, + volume: Optional[CreateClusterRequestVolumeSpec] = None, + endpoints: Optional[list[EndpointSpec]] = None, + user_name: Optional[str] = None, + password: Optional[str] = None, + ) -> Cluster: + """ + Create a Kafka cluster. + Create a new Kafka cluster. + :param version: Version of Kafka. + :param node_amount: Number of nodes to use for the Kafka cluster. + :param node_type: Type of node to use for the Kafka cluster. + :param region: Region to target. If none is passed will use default region from the config. + :param project_id: The ID of the Project in which the Kafka cluster will be created. + :param name: Name of the Kafka cluster. + :param tags: Tags to apply to the Kafka cluster. + :param volume: Kafka volume information. + :param endpoints: One or multiple EndpointSpec used to expose your Kafka cluster. + :param user_name: Username for the kafka user. + :param password: Password for the kafka user. + :return: :class:`Cluster ` + + Usage: + :: + + result = api.create_cluster( + version="example", + node_amount=1, + node_type="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/kafka/v1alpha1/regions/{param_region}/clusters", + body=marshal_CreateClusterRequest( + CreateClusterRequest( + version=version, + node_amount=node_amount, + node_type=node_type, + region=region, + project_id=project_id, + name=name or random_name(prefix="kafk"), + tags=tags, + volume=volume, + endpoints=endpoints, + user_name=user_name, + password=password, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Cluster(res.json()) + + def update_cluster( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + name: Optional[str] = None, + tags: Optional[list[str]] = None, + ) -> Cluster: + """ + Update a Kafka cluster. + Update the parameters of a Kafka cluster. + :param cluster_id: UUID of the Kafka Clusters to update. + :param region: Region to target. If none is passed will use default region from the config. + :param name: Name of the Kafka Cluster. + :param tags: Tags of a Kafka Cluster. + :return: :class:`Cluster ` + + Usage: + :: + + result = api.update_cluster( + cluster_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_cluster_id = validate_path_param("cluster_id", cluster_id) + + res = self._request( + "PATCH", + f"/kafka/v1alpha1/regions/{param_region}/clusters/{param_cluster_id}", + body=marshal_UpdateClusterRequest( + UpdateClusterRequest( + cluster_id=cluster_id, + region=region, + name=name, + tags=tags, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Cluster(res.json()) + + def delete_cluster( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + ) -> Cluster: + """ + Delete a Kafka cluster. + Delete a given Kafka cluster, specified by the `region` and `cluster_id` parameters. Deleting a Kafka cluster is permanent, and cannot be undone. Note that upon deletion all your data will be lost. + :param cluster_id: UUID of the Kafka Cluster to delete. + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`Cluster ` + + Usage: + :: + + result = api.delete_cluster( + cluster_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_cluster_id = validate_path_param("cluster_id", cluster_id) + + res = self._request( + "DELETE", + f"/kafka/v1alpha1/regions/{param_region}/clusters/{param_cluster_id}", + ) + + self._throw_on_error(res) + return unmarshal_Cluster(res.json()) + + def get_cluster_certificate_authority( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + ) -> ScwFile: + """ + Get a Kafka cluster's certificate authority. + Retrieve certificate authority for a given Kafka cluster, specified by the `region` and `cluster_id` parameters. The response object contains the certificate in PEM format. The certificate is required to validate the sever from the client side during TLS connection. + :param cluster_id: UUID of the Kafka Cluster. + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`ScwFile ` + + Usage: + :: + + result = api.get_cluster_certificate_authority( + cluster_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_cluster_id = validate_path_param("cluster_id", cluster_id) + + res = self._request( + "GET", + f"/kafka/v1alpha1/regions/{param_region}/clusters/{param_cluster_id}/certificate-authority", + ) + + self._throw_on_error(res) + return unmarshal_ScwFile(res.json()) + + def renew_cluster_certificate_authority( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + ) -> None: + """ + Renew the Kafka cluster's certificate authority. + Request to renew the certificate authority for a given Kafka cluster, specified by the `region` and `cluster_id` parameters. The certificate authority will be renewed within a few minutes. + :param cluster_id: UUID of the Kafka Cluster. + :param region: Region to target. If none is passed will use default region from the config. + + Usage: + :: + + result = api.renew_cluster_certificate_authority( + cluster_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_cluster_id = validate_path_param("cluster_id", cluster_id) + + res = self._request( + "POST", + f"/kafka/v1alpha1/regions/{param_region}/clusters/{param_cluster_id}/renew-certificate-authority", + body={}, + ) + + self._throw_on_error(res) + + def delete_endpoint( + self, + *, + endpoint_id: str, + region: Optional[ScwRegion] = None, + ) -> None: + """ + Delete a Kafka cluster endpoint. + Delete the endpoint of a Kafka cluster. You must specify the `endpoint_id` parameter of the endpoint you want to delete. Note that you might need to update any environment configurations that point to the deleted endpoint. + :param endpoint_id: UUID of the endpoint to delete. + :param region: Region to target. If none is passed will use default region from the config. + + Usage: + :: + + result = api.delete_endpoint( + endpoint_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_endpoint_id = validate_path_param("endpoint_id", endpoint_id) + + res = self._request( + "DELETE", + f"/kafka/v1alpha1/regions/{param_region}/endpoints/{param_endpoint_id}", + ) + + self._throw_on_error(res) + + def create_endpoint( + self, + *, + cluster_id: str, + endpoint: EndpointSpec, + region: Optional[ScwRegion] = None, + ) -> Endpoint: + """ + Create a new Kafka cluster endpoint. + Create a new endpoint for a Kafka cluster. You can add `public_network` or `private_network` specifications to the body of the request. Note that currently only `private_network` is supported. + :param cluster_id: UUID of the Kafka Cluster. + :param endpoint: Endpoint object (`EndpointSpec`) used to expose your Kafka EndpointSpec. + :param region: Region to target. If none is passed will use default region from the config. + :return: :class:`Endpoint ` + + Usage: + :: + + result = api.create_endpoint( + cluster_id="example", + endpoint=EndpointSpec(), + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + + res = self._request( + "POST", + f"/kafka/v1alpha1/regions/{param_region}/endpoints", + body=marshal_CreateEndpointRequest( + CreateEndpointRequest( + cluster_id=cluster_id, + endpoint=endpoint, + region=region, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_Endpoint(res.json()) + + def list_users( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListUsersRequestOrderBy] = None, + name: Optional[str] = None, + ) -> ListUsersResponse: + """ + Retrieve a list of deployment users. + :param cluster_id: + :param region: Region to target. If none is passed will use default region from the config. + :param page: + :param page_size: + :param order_by: + :param name: + :return: :class:`ListUsersResponse ` + + Usage: + :: + + result = api.list_users( + cluster_id="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_cluster_id = validate_path_param("cluster_id", cluster_id) + + res = self._request( + "GET", + f"/kafka/v1alpha1/regions/{param_region}/clusters/{param_cluster_id}/users", + params={ + "name": name, + "order_by": order_by, + "page": page, + "page_size": page_size or self.client.default_page_size, + }, + ) + + self._throw_on_error(res) + return unmarshal_ListUsersResponse(res.json()) + + def list_users_all( + self, + *, + cluster_id: str, + region: Optional[ScwRegion] = None, + page: Optional[int] = None, + page_size: Optional[int] = None, + order_by: Optional[ListUsersRequestOrderBy] = None, + name: Optional[str] = None, + ) -> list[User]: + """ + Retrieve a list of deployment users. + :param cluster_id: + :param region: Region to target. If none is passed will use default region from the config. + :param page: + :param page_size: + :param order_by: + :param name: + :return: :class:`list[User] ` + + Usage: + :: + + result = api.list_users_all( + cluster_id="example", + ) + """ + + return fetch_all_pages( + type=ListUsersResponse, + key="users", + fetcher=self.list_users, + args={ + "cluster_id": cluster_id, + "region": region, + "page": page, + "page_size": page_size, + "order_by": order_by, + "name": name, + }, + ) + + def update_user( + self, + *, + cluster_id: str, + username: str, + region: Optional[ScwRegion] = None, + password: Optional[str] = None, + ) -> User: + """ + Update an existing user. + :param cluster_id: ID of the cluster in which to update the user's password. + :param username: Username of the Kafka cluster user. + :param region: Region to target. If none is passed will use default region from the config. + :param password: New password for the Kafka cluster user. + :return: :class:`User ` + + Usage: + :: + + result = api.update_user( + cluster_id="example", + username="example", + ) + """ + + param_region = validate_path_param( + "region", region or self.client.default_region + ) + param_cluster_id = validate_path_param("cluster_id", cluster_id) + param_username = validate_path_param("username", username) + + res = self._request( + "PATCH", + f"/kafka/v1alpha1/regions/{param_region}/clusters/{param_cluster_id}/users/{param_username}", + body=marshal_UpdateUserRequest( + UpdateUserRequest( + cluster_id=cluster_id, + username=username, + region=region, + password=password, + ), + self.client, + ), + ) + + self._throw_on_error(res) + return unmarshal_User(res.json()) diff --git a/scaleway/scaleway/kafka/v1alpha1/content.py b/scaleway/scaleway/kafka/v1alpha1/content.py new file mode 100644 index 000000000..fd574593c --- /dev/null +++ b/scaleway/scaleway/kafka/v1alpha1/content.py @@ -0,0 +1,15 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. + +from .types import ( + ClusterStatus, +) + +CLUSTER_TRANSIENT_STATUSES: list[ClusterStatus] = [ + ClusterStatus.CREATING, + ClusterStatus.CONFIGURING, + ClusterStatus.DELETING, +] +""" +Lists transient statutes of the enum :class:`ClusterStatus `. +""" diff --git a/scaleway/scaleway/kafka/v1alpha1/marshalling.py b/scaleway/scaleway/kafka/v1alpha1/marshalling.py new file mode 100644 index 000000000..c69c7cfce --- /dev/null +++ b/scaleway/scaleway/kafka/v1alpha1/marshalling.py @@ -0,0 +1,862 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. + +from typing import Any +from dateutil import parser + +from scaleway_core.profile import ProfileDefaults +from scaleway_core.utils import ( + OneOfPossibility, + resolve_one_of, +) +from .types import ( + ClusterStatus, + NodeTypeStock, + VolumeType, + EndpointPrivateNetworkDetails, + EndpointPublicDetails, + Endpoint, + ClusterSetting, + Volume, + Cluster, + User, + ListClustersResponse, + NodeTypeVolumeType, + NodeType, + ListNodeTypesResponse, + ListUsersResponse, + VersionAvailableSettingBooleanProperty, + VersionAvailableSettingFloatProperty, + VersionAvailableSettingIntegerProperty, + VersionAvailableSettingStringProperty, + VersionAvailableSetting, + Version, + ListVersionsResponse, + EndpointSpecPrivateNetworkDetails, + EndpointSpecPublicDetails, + CreateClusterRequestVolumeSpec, + EndpointSpec, + CreateClusterRequest, + CreateEndpointRequest, + UpdateClusterRequest, + UpdateUserRequest, +) + + +def unmarshal_EndpointPrivateNetworkDetails(data: Any) -> EndpointPrivateNetworkDetails: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'EndpointPrivateNetworkDetails' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("private_network_id", None) + if field is not None: + args["private_network_id"] = field + else: + args["private_network_id"] = None + + return EndpointPrivateNetworkDetails(**args) + + +def unmarshal_EndpointPublicDetails(data: Any) -> EndpointPublicDetails: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'EndpointPublicDetails' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + return EndpointPublicDetails(**args) + + +def unmarshal_Endpoint(data: Any) -> Endpoint: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Endpoint' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + else: + args["id"] = None + + field = data.get("dns_records", None) + if field is not None: + args["dns_records"] = field + else: + args["dns_records"] = [] + + field = data.get("port", None) + if field is not None: + args["port"] = field + else: + args["port"] = 0 + + field = data.get("private_network", None) + if field is not None: + args["private_network"] = unmarshal_EndpointPrivateNetworkDetails(field) + else: + args["private_network"] = None + + field = data.get("public_network", None) + if field is not None: + args["public_network"] = unmarshal_EndpointPublicDetails(field) + else: + args["public_network"] = None + + return Endpoint(**args) + + +def unmarshal_ClusterSetting(data: Any) -> ClusterSetting: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ClusterSetting' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("name", None) + if field is not None: + args["name"] = field + else: + args["name"] = None + + field = data.get("bool_value", None) + if field is not None: + args["bool_value"] = field + else: + args["bool_value"] = False + + field = data.get("string_value", None) + if field is not None: + args["string_value"] = field + else: + args["string_value"] = None + + field = data.get("int_value", None) + if field is not None: + args["int_value"] = field + else: + args["int_value"] = 0 + + field = data.get("float_value", None) + if field is not None: + args["float_value"] = field + else: + args["float_value"] = 0.0 + + return ClusterSetting(**args) + + +def unmarshal_Volume(data: Any) -> Volume: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Volume' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("type", None) + if field is not None: + args["type_"] = field + else: + args["type_"] = VolumeType.UNKNOWN_TYPE + + field = data.get("size_bytes", None) + if field is not None: + args["size_bytes"] = field + else: + args["size_bytes"] = 0 + + return Volume(**args) + + +def unmarshal_Cluster(data: Any) -> Cluster: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Cluster' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("id", None) + if field is not None: + args["id"] = field + else: + args["id"] = None + + field = data.get("name", None) + if field is not None: + args["name"] = field + else: + args["name"] = None + + field = data.get("project_id", None) + if field is not None: + args["project_id"] = field + else: + args["project_id"] = None + + field = data.get("organization_id", None) + if field is not None: + args["organization_id"] = field + else: + args["organization_id"] = None + + field = data.get("status", None) + if field is not None: + args["status"] = field + else: + args["status"] = ClusterStatus.UNKNOWN_STATUS + + field = data.get("version", None) + if field is not None: + args["version"] = field + else: + args["version"] = None + + field = data.get("tags", None) + if field is not None: + args["tags"] = field + else: + args["tags"] = [] + + field = data.get("settings", None) + if field is not None: + args["settings"] = ( + [unmarshal_ClusterSetting(v) for v in field] if field is not None else None + ) + else: + args["settings"] = [] + + field = data.get("node_amount", None) + if field is not None: + args["node_amount"] = field + else: + args["node_amount"] = 0 + + field = data.get("node_type", None) + if field is not None: + args["node_type"] = field + else: + args["node_type"] = None + + field = data.get("endpoints", None) + if field is not None: + args["endpoints"] = ( + [unmarshal_Endpoint(v) for v in field] if field is not None else None + ) + else: + args["endpoints"] = [] + + field = data.get("region", None) + if field is not None: + args["region"] = field + else: + args["region"] = None + + field = data.get("volume", None) + if field is not None: + args["volume"] = unmarshal_Volume(field) + else: + args["volume"] = None + + field = data.get("created_at", None) + if field is not None: + args["created_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["created_at"] = None + + field = data.get("updated_at", None) + if field is not None: + args["updated_at"] = parser.isoparse(field) if isinstance(field, str) else field + else: + args["updated_at"] = None + + return Cluster(**args) + + +def unmarshal_User(data: Any) -> User: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'User' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("username", None) + if field is not None: + args["username"] = field + else: + args["username"] = None + + return User(**args) + + +def unmarshal_ListClustersResponse(data: Any) -> ListClustersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListClustersResponse' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("clusters", None) + if field is not None: + args["clusters"] = ( + [unmarshal_Cluster(v) for v in field] if field is not None else None + ) + else: + args["clusters"] = [] + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + else: + args["total_count"] = 0 + + return ListClustersResponse(**args) + + +def unmarshal_NodeTypeVolumeType(data: Any) -> NodeTypeVolumeType: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'NodeTypeVolumeType' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("type", None) + if field is not None: + args["type_"] = field + else: + args["type_"] = VolumeType.UNKNOWN_TYPE + + field = data.get("description", None) + if field is not None: + args["description"] = field + else: + args["description"] = None + + field = data.get("min_size_bytes", None) + if field is not None: + args["min_size_bytes"] = field + else: + args["min_size_bytes"] = 0 + + field = data.get("max_size_bytes", None) + if field is not None: + args["max_size_bytes"] = field + else: + args["max_size_bytes"] = 0 + + field = data.get("chunk_size_bytes", None) + if field is not None: + args["chunk_size_bytes"] = field + else: + args["chunk_size_bytes"] = 0 + + return NodeTypeVolumeType(**args) + + +def unmarshal_NodeType(data: Any) -> NodeType: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'NodeType' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("name", None) + if field is not None: + args["name"] = field + else: + args["name"] = None + + field = data.get("stock_status", None) + if field is not None: + args["stock_status"] = field + else: + args["stock_status"] = NodeTypeStock.UNKNOWN_STOCK + + field = data.get("description", None) + if field is not None: + args["description"] = field + else: + args["description"] = None + + field = data.get("vcpus", None) + if field is not None: + args["vcpus"] = field + else: + args["vcpus"] = 0 + + field = data.get("memory_bytes", None) + if field is not None: + args["memory_bytes"] = field + else: + args["memory_bytes"] = 0 + + field = data.get("available_volume_types", None) + if field is not None: + args["available_volume_types"] = ( + [unmarshal_NodeTypeVolumeType(v) for v in field] + if field is not None + else None + ) + else: + args["available_volume_types"] = [] + + field = data.get("disabled", None) + if field is not None: + args["disabled"] = field + else: + args["disabled"] = False + + field = data.get("beta", None) + if field is not None: + args["beta"] = field + else: + args["beta"] = False + + field = data.get("cluster_range", None) + if field is not None: + args["cluster_range"] = field + else: + args["cluster_range"] = None + + return NodeType(**args) + + +def unmarshal_ListNodeTypesResponse(data: Any) -> ListNodeTypesResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListNodeTypesResponse' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("node_types", None) + if field is not None: + args["node_types"] = ( + [unmarshal_NodeType(v) for v in field] if field is not None else None + ) + else: + args["node_types"] = [] + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + else: + args["total_count"] = 0 + + return ListNodeTypesResponse(**args) + + +def unmarshal_ListUsersResponse(data: Any) -> ListUsersResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListUsersResponse' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("users", None) + if field is not None: + args["users"] = ( + [unmarshal_User(v) for v in field] if field is not None else None + ) + else: + args["users"] = None + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + else: + args["total_count"] = None + + return ListUsersResponse(**args) + + +def unmarshal_VersionAvailableSettingBooleanProperty( + data: Any, +) -> VersionAvailableSettingBooleanProperty: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'VersionAvailableSettingBooleanProperty' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("default_value", None) + if field is not None: + args["default_value"] = field + else: + args["default_value"] = None + + return VersionAvailableSettingBooleanProperty(**args) + + +def unmarshal_VersionAvailableSettingFloatProperty( + data: Any, +) -> VersionAvailableSettingFloatProperty: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'VersionAvailableSettingFloatProperty' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("min", None) + if field is not None: + args["min"] = field + else: + args["min"] = None + + field = data.get("max", None) + if field is not None: + args["max"] = field + else: + args["max"] = None + + field = data.get("default_value", None) + if field is not None: + args["default_value"] = field + else: + args["default_value"] = None + + field = data.get("unit", None) + if field is not None: + args["unit"] = field + else: + args["unit"] = None + + return VersionAvailableSettingFloatProperty(**args) + + +def unmarshal_VersionAvailableSettingIntegerProperty( + data: Any, +) -> VersionAvailableSettingIntegerProperty: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'VersionAvailableSettingIntegerProperty' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("min", None) + if field is not None: + args["min"] = field + else: + args["min"] = None + + field = data.get("max", None) + if field is not None: + args["max"] = field + else: + args["max"] = None + + field = data.get("default_value", None) + if field is not None: + args["default_value"] = field + else: + args["default_value"] = None + + field = data.get("unit", None) + if field is not None: + args["unit"] = field + else: + args["unit"] = None + + return VersionAvailableSettingIntegerProperty(**args) + + +def unmarshal_VersionAvailableSettingStringProperty( + data: Any, +) -> VersionAvailableSettingStringProperty: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'VersionAvailableSettingStringProperty' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("default_value", None) + if field is not None: + args["default_value"] = field + else: + args["default_value"] = None + + field = data.get("string_constraint", None) + if field is not None: + args["string_constraint"] = field + else: + args["string_constraint"] = None + + return VersionAvailableSettingStringProperty(**args) + + +def unmarshal_VersionAvailableSetting(data: Any) -> VersionAvailableSetting: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'VersionAvailableSetting' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("name", None) + if field is not None: + args["name"] = field + else: + args["name"] = None + + field = data.get("hot_configurable", None) + if field is not None: + args["hot_configurable"] = field + else: + args["hot_configurable"] = False + + field = data.get("description", None) + if field is not None: + args["description"] = field + else: + args["description"] = None + + field = data.get("bool_property", None) + if field is not None: + args["bool_property"] = unmarshal_VersionAvailableSettingBooleanProperty(field) + else: + args["bool_property"] = None + + field = data.get("string_property", None) + if field is not None: + args["string_property"] = unmarshal_VersionAvailableSettingStringProperty(field) + else: + args["string_property"] = None + + field = data.get("int_property", None) + if field is not None: + args["int_property"] = unmarshal_VersionAvailableSettingIntegerProperty(field) + else: + args["int_property"] = None + + field = data.get("float_property", None) + if field is not None: + args["float_property"] = unmarshal_VersionAvailableSettingFloatProperty(field) + else: + args["float_property"] = None + + return VersionAvailableSetting(**args) + + +def unmarshal_Version(data: Any) -> Version: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'Version' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("version", None) + if field is not None: + args["version"] = field + else: + args["version"] = None + + field = data.get("available_settings", None) + if field is not None: + args["available_settings"] = ( + [unmarshal_VersionAvailableSetting(v) for v in field] + if field is not None + else None + ) + else: + args["available_settings"] = [] + + field = data.get("end_of_life_at", None) + if field is not None: + args["end_of_life_at"] = ( + parser.isoparse(field) if isinstance(field, str) else field + ) + else: + args["end_of_life_at"] = None + + return Version(**args) + + +def unmarshal_ListVersionsResponse(data: Any) -> ListVersionsResponse: + if not isinstance(data, dict): + raise TypeError( + "Unmarshalling the type 'ListVersionsResponse' failed as data isn't a dictionary." + ) + + args: dict[str, Any] = {} + + field = data.get("versions", None) + if field is not None: + args["versions"] = ( + [unmarshal_Version(v) for v in field] if field is not None else None + ) + else: + args["versions"] = [] + + field = data.get("total_count", None) + if field is not None: + args["total_count"] = field + else: + args["total_count"] = 0 + + return ListVersionsResponse(**args) + + +def marshal_EndpointSpecPrivateNetworkDetails( + request: EndpointSpecPrivateNetworkDetails, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + + if request.private_network_id is not None: + output["private_network_id"] = request.private_network_id + + return output + + +def marshal_EndpointSpecPublicDetails( + request: EndpointSpecPublicDetails, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + + return output + + +def marshal_CreateClusterRequestVolumeSpec( + request: CreateClusterRequestVolumeSpec, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + + if request.size_bytes is not None: + output["size_bytes"] = request.size_bytes + + if request.type_ is not None: + output["type"] = request.type_ + + return output + + +def marshal_EndpointSpec( + request: EndpointSpec, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + output.update( + resolve_one_of( + [ + OneOfPossibility( + param="public_network", + value=request.public_network, + marshal_func=marshal_EndpointSpecPublicDetails, + ), + OneOfPossibility( + param="private_network", + value=request.private_network, + marshal_func=marshal_EndpointSpecPrivateNetworkDetails, + ), + ] + ), + ) + + return output + + +def marshal_CreateClusterRequest( + request: CreateClusterRequest, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + + if request.version is not None: + output["version"] = request.version + + if request.node_amount is not None: + output["node_amount"] = request.node_amount + + if request.node_type is not None: + output["node_type"] = request.node_type + + if request.project_id is not None: + output["project_id"] = request.project_id + else: + output["project_id"] = defaults.default_project_id + + if request.name is not None: + output["name"] = request.name + + if request.tags is not None: + output["tags"] = request.tags + + if request.volume is not None: + output["volume"] = marshal_CreateClusterRequestVolumeSpec( + request.volume, defaults + ) + + if request.endpoints is not None: + output["endpoints"] = [ + marshal_EndpointSpec(item, defaults) for item in request.endpoints + ] + + if request.user_name is not None: + output["user_name"] = request.user_name + + if request.password is not None: + output["password"] = request.password + + return output + + +def marshal_CreateEndpointRequest( + request: CreateEndpointRequest, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + + if request.cluster_id is not None: + output["cluster_id"] = request.cluster_id + + if request.endpoint is not None: + output["endpoint"] = marshal_EndpointSpec(request.endpoint, defaults) + + return output + + +def marshal_UpdateClusterRequest( + request: UpdateClusterRequest, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + + if request.name is not None: + output["name"] = request.name + + if request.tags is not None: + output["tags"] = request.tags + + return output + + +def marshal_UpdateUserRequest( + request: UpdateUserRequest, + defaults: ProfileDefaults, +) -> dict[str, Any]: + output: dict[str, Any] = {} + + if request.password is not None: + output["password"] = request.password + + return output diff --git a/scaleway/scaleway/kafka/v1alpha1/types.py b/scaleway/scaleway/kafka/v1alpha1/types.py new file mode 100644 index 000000000..b5c8e0822 --- /dev/null +++ b/scaleway/scaleway/kafka/v1alpha1/types.py @@ -0,0 +1,730 @@ +# This file was automatically generated. DO NOT EDIT. +# If you have any remark or suggestion do not hesitate to open an issue. +from __future__ import annotations + +from dataclasses import dataclass, field +from datetime import datetime +from enum import Enum +from typing import Optional + +from scaleway_core.bridge import ( + Region as ScwRegion, +) +from scaleway_core.utils import ( + StrEnumMeta, +) + + +class ClusterStatus(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_STATUS = "unknown_status" + READY = "ready" + CREATING = "creating" + CONFIGURING = "configuring" + DELETING = "deleting" + ERROR = "error" + LOCKED = "locked" + STOPPED = "stopped" + + def __str__(self) -> str: + return str(self.value) + + +class ListClustersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + CREATED_AT_ASC = "created_at_asc" + CREATED_AT_DESC = "created_at_desc" + NAME_ASC = "name_asc" + NAME_DESC = "name_desc" + STATUS_ASC = "status_asc" + STATUS_DESC = "status_desc" + + def __str__(self) -> str: + return str(self.value) + + +class ListUsersRequestOrderBy(str, Enum, metaclass=StrEnumMeta): + NAME_ASC = "name_asc" + NAME_DESC = "name_desc" + + def __str__(self) -> str: + return str(self.value) + + +class NodeTypeStock(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_STOCK = "unknown_stock" + LOW_STOCK = "low_stock" + OUT_OF_STOCK = "out_of_stock" + AVAILABLE = "available" + + def __str__(self) -> str: + return str(self.value) + + +class VolumeType(str, Enum, metaclass=StrEnumMeta): + UNKNOWN_TYPE = "unknown_type" + SBS_5K = "sbs_5k" + SBS_15K = "sbs_15k" + + def __str__(self) -> str: + return str(self.value) + + +@dataclass +class EndpointPrivateNetworkDetails: + """ + Private Network details. + """ + + private_network_id: str + """ + UUID of the Private Network. + """ + + +@dataclass +class EndpointPublicDetails: + """ + Public Access details. + """ + + pass + + +@dataclass +class VersionAvailableSettingBooleanProperty: + default_value: bool + + +@dataclass +class VersionAvailableSettingFloatProperty: + min: float + max: float + default_value: float + unit: Optional[str] = None + + +@dataclass +class VersionAvailableSettingIntegerProperty: + min: int + max: int + default_value: int + unit: Optional[str] = None + + +@dataclass +class VersionAvailableSettingStringProperty: + default_value: str + string_constraint: Optional[str] = None + + +@dataclass +class EndpointSpecPrivateNetworkDetails: + private_network_id: str + """ + UUID of the Private Network. + """ + + +@dataclass +class EndpointSpecPublicDetails: + pass + + +@dataclass +class ClusterSetting: + name: str + """ + Name of the setting. + """ + + bool_value: Optional[bool] = False + + string_value: Optional[str] = None + + int_value: Optional[int] = 0 + + float_value: Optional[float] = 0.0 + + +@dataclass +class Endpoint: + id: str + """ + UUID of the endpoint. + """ + + dns_records: list[str] + """ + List of DNS records of the endpoint. + """ + + port: int + """ + TCP port of the endpoint. + """ + + private_network: Optional[EndpointPrivateNetworkDetails] = None + + public_network: Optional[EndpointPublicDetails] = None + + +@dataclass +class Volume: + type_: VolumeType + """ + Type of volume where data is stored. + """ + + size_bytes: int + """ + Volume size. + """ + + +@dataclass +class NodeTypeVolumeType: + type_: VolumeType + """ + Volume type. + """ + + description: str + """ + The description of the volume. + """ + + min_size_bytes: int + """ + Mimimum size required for the volume. + """ + + max_size_bytes: int + """ + Maximum size required for the volume. + """ + + chunk_size_bytes: int + """ + Minimum increment level for a Block Storage volume size. + """ + + +@dataclass +class VersionAvailableSetting: + name: str + """ + Kafka cluster setting name. + """ + + hot_configurable: bool + """ + Defines whether this setting can be applied without needing a restart. + """ + + description: str + """ + Setting description. + """ + + bool_property: Optional[VersionAvailableSettingBooleanProperty] = None + + string_property: Optional[VersionAvailableSettingStringProperty] = None + + int_property: Optional[VersionAvailableSettingIntegerProperty] = None + + float_property: Optional[VersionAvailableSettingFloatProperty] = None + + +@dataclass +class CreateClusterRequestVolumeSpec: + size_bytes: int + """ + Volume size. + """ + + type_: VolumeType + """ + Type of volume where data is stored. + """ + + +@dataclass +class EndpointSpec: + public_network: Optional[EndpointSpecPublicDetails] = None + + private_network: Optional[EndpointSpecPrivateNetworkDetails] = None + + +@dataclass +class Cluster: + id: str + """ + UUID of the Kafka cluster. + """ + + name: str + """ + Name of the Kafka cluster. + """ + + project_id: str + """ + Project ID the Kafka cluster belongs to. + """ + + organization_id: str + """ + Organisation ID the Kafka cluster belongs to. + """ + + status: ClusterStatus + """ + Status of the Kafka cluster. + """ + + version: str + """ + Kafka version of the Kafka cluster. + """ + + tags: list[str] + """ + List of tags applied to the Kafka cluster. + """ + + settings: list[ClusterSetting] + """ + Advanced settings of the Kafka cluster. + """ + + node_amount: int + """ + Number of nodes in Kafka cluster. + """ + + node_type: str + """ + Node type of the Kafka cluster. + """ + + endpoints: list[Endpoint] + """ + List of Kafka cluster endpoints. + """ + + region: ScwRegion + """ + Region the Kafka cluster is in. + """ + + volume: Optional[Volume] = None + """ + Volumes of the Kafka cluster. + """ + + created_at: Optional[datetime] = None + """ + Creation date (must follow the ISO 8601 format). + """ + + updated_at: Optional[datetime] = None + """ + Last update date (must follow the ISO 8601 format). + """ + + +@dataclass +class NodeType: + name: str + """ + Node type name identifier. + """ + + stock_status: NodeTypeStock + """ + Current stock status for the node type. + """ + + description: str + """ + Current specifications of the node type offer. + """ + + vcpus: int + """ + Number of virtual CPUs of the node type. + """ + + memory_bytes: int + """ + Quantity of RAM. + """ + + available_volume_types: list[NodeTypeVolumeType] + """ + Available storage options for the node type. + """ + + disabled: bool + """ + Defines whether the node type is currently disabled. + """ + + beta: bool + """ + Defines whether the node type is currently in beta. + """ + + cluster_range: str + """ + Cluster range associated with the node type offer. + """ + + +@dataclass +class User: + username: str + + +@dataclass +class Version: + version: str + """ + Kafka version. + """ + + available_settings: list[VersionAvailableSetting] + """ + Cluster configuration settings you are able to change for clusters running this version. Each item in `available_settings` describes one configurable cluster setting. + """ + + end_of_life_at: Optional[datetime] = None + """ + Date of End of Life for the version. + """ + + +@dataclass +class CreateClusterRequest: + version: str + """ + Version of Kafka. + """ + + node_amount: int + """ + Number of nodes to use for the Kafka cluster. + """ + + node_type: str + """ + Type of node to use for the Kafka cluster. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + project_id: Optional[str] = None + """ + The ID of the Project in which the Kafka cluster will be created. + """ + + name: Optional[str] = None + """ + Name of the Kafka cluster. + """ + + tags: Optional[list[str]] = field(default_factory=list) + """ + Tags to apply to the Kafka cluster. + """ + + volume: Optional[CreateClusterRequestVolumeSpec] = None + """ + Kafka volume information. + """ + + endpoints: Optional[list[EndpointSpec]] = field(default_factory=list) + """ + One or multiple EndpointSpec used to expose your Kafka cluster. + """ + + user_name: Optional[str] = None + """ + Username for the kafka user. + """ + + password: Optional[str] = None + """ + Password for the kafka user. + """ + + +@dataclass +class CreateEndpointRequest: + cluster_id: str + """ + UUID of the Kafka Cluster. + """ + + endpoint: EndpointSpec + """ + Endpoint object (`EndpointSpec`) used to expose your Kafka EndpointSpec. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class DeleteClusterRequest: + cluster_id: str + """ + UUID of the Kafka Cluster to delete. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class DeleteEndpointRequest: + endpoint_id: str + """ + UUID of the endpoint to delete. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class GetClusterCertificateAuthorityRequest: + cluster_id: str + """ + UUID of the Kafka Cluster. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class GetClusterRequest: + cluster_id: str + """ + UUID of the Kafka Cluster. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class ListClustersRequest: + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + tags: Optional[list[str]] = field(default_factory=list) + """ + List Kafka cluster with a given tag. + """ + + name: Optional[str] = None + """ + Lists Kafka clusters that match a name pattern. + """ + + order_by: Optional[ListClustersRequestOrderBy] = ( + ListClustersRequestOrderBy.CREATED_AT_ASC + ) + """ + Criteria to use when ordering Kafka cluster listings. + """ + + organization_id: Optional[str] = None + """ + Organization ID of the Kafka cluster. + """ + + project_id: Optional[str] = None + """ + Project ID. + """ + + page: Optional[int] = 0 + page_size: Optional[int] = 0 + + +@dataclass +class ListClustersResponse: + clusters: list[Cluster] + """ + List of all Kafka cluster available in an Organization or Project. + """ + + total_count: int + """ + Total count of Kafka cluster available in an Organization or Project. + """ + + +@dataclass +class ListNodeTypesRequest: + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + include_disabled_types: Optional[bool] = False + """ + Defines whether or not to include disabled types. + """ + + page: Optional[int] = 0 + page_size: Optional[int] = 0 + + +@dataclass +class ListNodeTypesResponse: + node_types: list[NodeType] + """ + Types of the node. + """ + + total_count: int + """ + Total count of node types available. + """ + + +@dataclass +class ListUsersRequest: + cluster_id: str + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + page: Optional[int] = None + page_size: Optional[int] = None + order_by: Optional[ListUsersRequestOrderBy] = None + name: Optional[str] = None + + +@dataclass +class ListUsersResponse: + users: list[User] + total_count: int + + +@dataclass +class ListVersionsRequest: + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + version: Optional[str] = None + """ + Kafka version to filter for. + """ + + page: Optional[int] = 0 + """ + The page number to return, from the paginated results. + """ + + page_size: Optional[int] = 0 + """ + The number of items to return. + """ + + +@dataclass +class ListVersionsResponse: + versions: list[Version] + """ + Available Kafka versions. + """ + + total_count: int + """ + Total count of Kafka versions available. + """ + + +@dataclass +class RenewClusterCertificateAuthorityRequest: + cluster_id: str + """ + UUID of the Kafka Cluster. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + +@dataclass +class UpdateClusterRequest: + cluster_id: str + """ + UUID of the Kafka Clusters to update. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + name: Optional[str] = None + """ + Name of the Kafka Cluster. + """ + + tags: Optional[list[str]] = field(default_factory=list) + """ + Tags of a Kafka Cluster. + """ + + +@dataclass +class UpdateUserRequest: + """ + Update a user of a Kafka cluster. + """ + + cluster_id: str + """ + ID of the cluster in which to update the user's password. + """ + + username: str + """ + Username of the Kafka cluster user. + """ + + region: Optional[ScwRegion] = None + """ + Region to target. If none is passed will use default region from the config. + """ + + password: Optional[str] = None + """ + New password for the Kafka cluster user. + """